Mercurial > hg > truffle
comparison src/share/vm/opto/library_call.cpp @ 12355:cefad50507d8
Merge with hs25-b53
author | Gilles Duboscq <duboscq@ssw.jku.at> |
---|---|
date | Fri, 11 Oct 2013 10:38:03 +0200 |
parents | 29bdcf12457c |
children | 3213ba4d3dff |
comparison
equal
deleted
inserted
replaced
12058:ccb4f2af2319 | 12355:cefad50507d8 |
---|---|
30 #include "oops/objArrayKlass.hpp" | 30 #include "oops/objArrayKlass.hpp" |
31 #include "opto/addnode.hpp" | 31 #include "opto/addnode.hpp" |
32 #include "opto/callGenerator.hpp" | 32 #include "opto/callGenerator.hpp" |
33 #include "opto/cfgnode.hpp" | 33 #include "opto/cfgnode.hpp" |
34 #include "opto/idealKit.hpp" | 34 #include "opto/idealKit.hpp" |
35 #include "opto/mathexactnode.hpp" | |
35 #include "opto/mulnode.hpp" | 36 #include "opto/mulnode.hpp" |
36 #include "opto/parse.hpp" | 37 #include "opto/parse.hpp" |
37 #include "opto/runtime.hpp" | 38 #include "opto/runtime.hpp" |
38 #include "opto/subnode.hpp" | 39 #include "opto/subnode.hpp" |
39 #include "prims/nativeLookup.hpp" | 40 #include "prims/nativeLookup.hpp" |
44 // Extend the set of intrinsics known to the runtime: | 45 // Extend the set of intrinsics known to the runtime: |
45 public: | 46 public: |
46 private: | 47 private: |
47 bool _is_virtual; | 48 bool _is_virtual; |
48 bool _is_predicted; | 49 bool _is_predicted; |
50 bool _does_virtual_dispatch; | |
49 vmIntrinsics::ID _intrinsic_id; | 51 vmIntrinsics::ID _intrinsic_id; |
50 | 52 |
51 public: | 53 public: |
52 LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, vmIntrinsics::ID id) | 54 LibraryIntrinsic(ciMethod* m, bool is_virtual, bool is_predicted, bool does_virtual_dispatch, vmIntrinsics::ID id) |
53 : InlineCallGenerator(m), | 55 : InlineCallGenerator(m), |
54 _is_virtual(is_virtual), | 56 _is_virtual(is_virtual), |
55 _is_predicted(is_predicted), | 57 _is_predicted(is_predicted), |
58 _does_virtual_dispatch(does_virtual_dispatch), | |
56 _intrinsic_id(id) | 59 _intrinsic_id(id) |
57 { | 60 { |
58 } | 61 } |
59 virtual bool is_intrinsic() const { return true; } | 62 virtual bool is_intrinsic() const { return true; } |
60 virtual bool is_virtual() const { return _is_virtual; } | 63 virtual bool is_virtual() const { return _is_virtual; } |
61 virtual bool is_predicted() const { return _is_predicted; } | 64 virtual bool is_predicted() const { return _is_predicted; } |
65 virtual bool does_virtual_dispatch() const { return _does_virtual_dispatch; } | |
62 virtual JVMState* generate(JVMState* jvms); | 66 virtual JVMState* generate(JVMState* jvms); |
63 virtual Node* generate_predicate(JVMState* jvms); | 67 virtual Node* generate_predicate(JVMState* jvms); |
64 vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; } | 68 vmIntrinsics::ID intrinsic_id() const { return _intrinsic_id; } |
65 }; | 69 }; |
66 | 70 |
197 Node* round_double_node(Node* n); | 201 Node* round_double_node(Node* n); |
198 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName); | 202 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName); |
199 bool inline_math_native(vmIntrinsics::ID id); | 203 bool inline_math_native(vmIntrinsics::ID id); |
200 bool inline_trig(vmIntrinsics::ID id); | 204 bool inline_trig(vmIntrinsics::ID id); |
201 bool inline_math(vmIntrinsics::ID id); | 205 bool inline_math(vmIntrinsics::ID id); |
206 bool inline_math_mathExact(Node* math); | |
207 bool inline_math_addExact(); | |
202 bool inline_exp(); | 208 bool inline_exp(); |
203 bool inline_pow(); | 209 bool inline_pow(); |
204 void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName); | 210 void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName); |
205 bool inline_min_max(vmIntrinsics::ID id); | 211 bool inline_min_max(vmIntrinsics::ID id); |
206 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y); | 212 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y); |
211 // Generates the guards that check whether the result of | 217 // Generates the guards that check whether the result of |
212 // Unsafe.getObject should be recorded in an SATB log buffer. | 218 // Unsafe.getObject should be recorded in an SATB log buffer. |
213 void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar); | 219 void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar); |
214 bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile); | 220 bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile); |
215 bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static); | 221 bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static); |
222 static bool klass_needs_init_guard(Node* kls); | |
216 bool inline_unsafe_allocate(); | 223 bool inline_unsafe_allocate(); |
217 bool inline_unsafe_copyMemory(); | 224 bool inline_unsafe_copyMemory(); |
218 bool inline_native_currentThread(); | 225 bool inline_native_currentThread(); |
219 #ifdef TRACE_HAVE_INTRINSICS | 226 #ifdef TRACE_HAVE_INTRINSICS |
220 bool inline_native_classID(); | 227 bool inline_native_classID(); |
349 return NULL; | 356 return NULL; |
350 } | 357 } |
351 } | 358 } |
352 | 359 |
353 bool is_predicted = false; | 360 bool is_predicted = false; |
361 bool does_virtual_dispatch = false; | |
354 | 362 |
355 switch (id) { | 363 switch (id) { |
356 case vmIntrinsics::_compareTo: | 364 case vmIntrinsics::_compareTo: |
357 if (!SpecialStringCompareTo) return NULL; | 365 if (!SpecialStringCompareTo) return NULL; |
358 if (!Matcher::match_rule_supported(Op_StrComp)) return NULL; | 366 if (!Matcher::match_rule_supported(Op_StrComp)) return NULL; |
375 if (StubRoutines::unsafe_arraycopy() == NULL) return NULL; | 383 if (StubRoutines::unsafe_arraycopy() == NULL) return NULL; |
376 if (!InlineArrayCopy) return NULL; | 384 if (!InlineArrayCopy) return NULL; |
377 break; | 385 break; |
378 case vmIntrinsics::_hashCode: | 386 case vmIntrinsics::_hashCode: |
379 if (!InlineObjectHash) return NULL; | 387 if (!InlineObjectHash) return NULL; |
388 does_virtual_dispatch = true; | |
380 break; | 389 break; |
381 case vmIntrinsics::_clone: | 390 case vmIntrinsics::_clone: |
391 does_virtual_dispatch = true; | |
382 case vmIntrinsics::_copyOf: | 392 case vmIntrinsics::_copyOf: |
383 case vmIntrinsics::_copyOfRange: | 393 case vmIntrinsics::_copyOfRange: |
384 if (!InlineObjectCopy) return NULL; | 394 if (!InlineObjectCopy) return NULL; |
385 // These also use the arraycopy intrinsic mechanism: | 395 // These also use the arraycopy intrinsic mechanism: |
386 if (!InlineArrayCopy) return NULL; | 396 if (!InlineArrayCopy) return NULL; |
493 | 503 |
494 case vmIntrinsics::_updateCRC32: | 504 case vmIntrinsics::_updateCRC32: |
495 case vmIntrinsics::_updateBytesCRC32: | 505 case vmIntrinsics::_updateBytesCRC32: |
496 case vmIntrinsics::_updateByteBufferCRC32: | 506 case vmIntrinsics::_updateByteBufferCRC32: |
497 if (!UseCRC32Intrinsics) return NULL; | 507 if (!UseCRC32Intrinsics) return NULL; |
508 break; | |
509 | |
510 case vmIntrinsics::_addExact: | |
511 if (!Matcher::match_rule_supported(Op_AddExactI)) { | |
512 return NULL; | |
513 } | |
514 if (!UseMathExactIntrinsics) { | |
515 return NULL; | |
516 } | |
498 break; | 517 break; |
499 | 518 |
500 default: | 519 default: |
501 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility"); | 520 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility"); |
502 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?"); | 521 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?"); |
526 // -XX:-InlineUnsafeOps disables natives from the Unsafe class. | 545 // -XX:-InlineUnsafeOps disables natives from the Unsafe class. |
527 if (m->holder()->name() == ciSymbol::sun_misc_Unsafe()) { | 546 if (m->holder()->name() == ciSymbol::sun_misc_Unsafe()) { |
528 if (!InlineUnsafeOps) return NULL; | 547 if (!InlineUnsafeOps) return NULL; |
529 } | 548 } |
530 | 549 |
531 return new LibraryIntrinsic(m, is_virtual, is_predicted, (vmIntrinsics::ID) id); | 550 return new LibraryIntrinsic(m, is_virtual, is_predicted, does_virtual_dispatch, (vmIntrinsics::ID) id); |
532 } | 551 } |
533 | 552 |
534 //----------------------register_library_intrinsics----------------------- | 553 //----------------------register_library_intrinsics----------------------- |
535 // Initialize this file's data structures, for each Compile instance. | 554 // Initialize this file's data structures, for each Compile instance. |
536 void Compile::register_library_intrinsics() { | 555 void Compile::register_library_intrinsics() { |
540 JVMState* LibraryIntrinsic::generate(JVMState* jvms) { | 559 JVMState* LibraryIntrinsic::generate(JVMState* jvms) { |
541 LibraryCallKit kit(jvms, this); | 560 LibraryCallKit kit(jvms, this); |
542 Compile* C = kit.C; | 561 Compile* C = kit.C; |
543 int nodes = C->unique(); | 562 int nodes = C->unique(); |
544 #ifndef PRODUCT | 563 #ifndef PRODUCT |
545 if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) { | 564 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { |
546 char buf[1000]; | 565 char buf[1000]; |
547 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); | 566 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); |
548 tty->print_cr("Intrinsic %s", str); | 567 tty->print_cr("Intrinsic %s", str); |
549 } | 568 } |
550 #endif | 569 #endif |
551 ciMethod* callee = kit.callee(); | 570 ciMethod* callee = kit.callee(); |
552 const int bci = kit.bci(); | 571 const int bci = kit.bci(); |
553 | 572 |
554 // Try to inline the intrinsic. | 573 // Try to inline the intrinsic. |
555 if (kit.try_to_inline()) { | 574 if (kit.try_to_inline()) { |
556 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { | 575 if (C->print_intrinsics() || C->print_inlining()) { |
557 C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); | 576 C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); |
558 } | 577 } |
559 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); | 578 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); |
560 if (C->log()) { | 579 if (C->log()) { |
561 C->log()->elem("intrinsic id='%s'%s nodes='%d'", | 580 C->log()->elem("intrinsic id='%s'%s nodes='%d'", |
567 kit.push_result(); | 586 kit.push_result(); |
568 return kit.transfer_exceptions_into_jvms(); | 587 return kit.transfer_exceptions_into_jvms(); |
569 } | 588 } |
570 | 589 |
571 // The intrinsic bailed out | 590 // The intrinsic bailed out |
572 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { | 591 if (C->print_intrinsics() || C->print_inlining()) { |
573 if (jvms->has_method()) { | 592 if (jvms->has_method()) { |
574 // Not a root compile. | 593 // Not a root compile. |
575 const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)"; | 594 const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)"; |
576 C->print_inlining(callee, jvms->depth() - 1, bci, msg); | 595 C->print_inlining(callee, jvms->depth() - 1, bci, msg); |
577 } else { | 596 } else { |
589 LibraryCallKit kit(jvms, this); | 608 LibraryCallKit kit(jvms, this); |
590 Compile* C = kit.C; | 609 Compile* C = kit.C; |
591 int nodes = C->unique(); | 610 int nodes = C->unique(); |
592 #ifndef PRODUCT | 611 #ifndef PRODUCT |
593 assert(is_predicted(), "sanity"); | 612 assert(is_predicted(), "sanity"); |
594 if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) { | 613 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { |
595 char buf[1000]; | 614 char buf[1000]; |
596 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); | 615 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); |
597 tty->print_cr("Predicate for intrinsic %s", str); | 616 tty->print_cr("Predicate for intrinsic %s", str); |
598 } | 617 } |
599 #endif | 618 #endif |
600 ciMethod* callee = kit.callee(); | 619 ciMethod* callee = kit.callee(); |
601 const int bci = kit.bci(); | 620 const int bci = kit.bci(); |
602 | 621 |
603 Node* slow_ctl = kit.try_to_predicate(); | 622 Node* slow_ctl = kit.try_to_predicate(); |
604 if (!kit.failing()) { | 623 if (!kit.failing()) { |
605 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { | 624 if (C->print_intrinsics() || C->print_inlining()) { |
606 C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); | 625 C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); |
607 } | 626 } |
608 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); | 627 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); |
609 if (C->log()) { | 628 if (C->log()) { |
610 C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'", | 629 C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'", |
614 } | 633 } |
615 return slow_ctl; // Could be NULL if the check folds. | 634 return slow_ctl; // Could be NULL if the check folds. |
616 } | 635 } |
617 | 636 |
618 // The intrinsic bailed out | 637 // The intrinsic bailed out |
619 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { | 638 if (C->print_intrinsics() || C->print_inlining()) { |
620 if (jvms->has_method()) { | 639 if (jvms->has_method()) { |
621 // Not a root compile. | 640 // Not a root compile. |
622 const char* msg = "failed to generate predicate for intrinsic"; | 641 const char* msg = "failed to generate predicate for intrinsic"; |
623 C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg); | 642 C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg); |
624 } else { | 643 } else { |
664 case vmIntrinsics::_dlog10: | 683 case vmIntrinsics::_dlog10: |
665 case vmIntrinsics::_dpow: return inline_math_native(intrinsic_id()); | 684 case vmIntrinsics::_dpow: return inline_math_native(intrinsic_id()); |
666 | 685 |
667 case vmIntrinsics::_min: | 686 case vmIntrinsics::_min: |
668 case vmIntrinsics::_max: return inline_min_max(intrinsic_id()); | 687 case vmIntrinsics::_max: return inline_min_max(intrinsic_id()); |
688 | |
689 case vmIntrinsics::_addExact: return inline_math_addExact(); | |
669 | 690 |
670 case vmIntrinsics::_arraycopy: return inline_arraycopy(); | 691 case vmIntrinsics::_arraycopy: return inline_arraycopy(); |
671 | 692 |
672 case vmIntrinsics::_compareTo: return inline_string_compareTo(); | 693 case vmIntrinsics::_compareTo: return inline_string_compareTo(); |
673 case vmIntrinsics::_indexOf: return inline_string_indexOf(); | 694 case vmIntrinsics::_indexOf: return inline_string_indexOf(); |
1277 Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true))); | 1298 Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true))); |
1278 jint target_length = target_array->length(); | 1299 jint target_length = target_array->length(); |
1279 const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin)); | 1300 const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin)); |
1280 const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot); | 1301 const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot); |
1281 | 1302 |
1303 // String.value field is known to be @Stable. | |
1304 if (UseImplicitStableValues) { | |
1305 target = cast_array_to_stable(target, target_type); | |
1306 } | |
1307 | |
1282 IdealKit kit(this, false, true); | 1308 IdealKit kit(this, false, true); |
1283 #define __ kit. | 1309 #define __ kit. |
1284 Node* zero = __ ConI(0); | 1310 Node* zero = __ ConI(0); |
1285 Node* one = __ ConI(1); | 1311 Node* one = __ ConI(1); |
1286 Node* cache = __ ConI(cache_i); | 1312 Node* cache = __ ConI(cache_i); |
1900 } | 1926 } |
1901 | 1927 |
1902 //----------------------------inline_min_max----------------------------------- | 1928 //----------------------------inline_min_max----------------------------------- |
1903 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) { | 1929 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) { |
1904 set_result(generate_min_max(id, argument(0), argument(1))); | 1930 set_result(generate_min_max(id, argument(0), argument(1))); |
1931 return true; | |
1932 } | |
1933 | |
1934 bool LibraryCallKit::inline_math_mathExact(Node* math) { | |
1935 Node* result = _gvn.transform( new(C) ProjNode(math, MathExactNode::result_proj_node)); | |
1936 Node* flags = _gvn.transform( new(C) FlagsProjNode(math, MathExactNode::flags_proj_node)); | |
1937 | |
1938 Node* bol = _gvn.transform( new (C) BoolNode(flags, BoolTest::overflow) ); | |
1939 IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN); | |
1940 Node* fast_path = _gvn.transform( new (C) IfFalseNode(check)); | |
1941 Node* slow_path = _gvn.transform( new (C) IfTrueNode(check) ); | |
1942 | |
1943 { | |
1944 PreserveJVMState pjvms(this); | |
1945 PreserveReexecuteState preexecs(this); | |
1946 jvms()->set_should_reexecute(true); | |
1947 | |
1948 set_control(slow_path); | |
1949 set_i_o(i_o()); | |
1950 | |
1951 uncommon_trap(Deoptimization::Reason_intrinsic, | |
1952 Deoptimization::Action_none); | |
1953 } | |
1954 | |
1955 set_control(fast_path); | |
1956 set_result(result); | |
1957 return true; | |
1958 } | |
1959 | |
1960 bool LibraryCallKit::inline_math_addExact() { | |
1961 Node* arg1 = argument(0); | |
1962 Node* arg2 = argument(1); | |
1963 | |
1964 Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) ); | |
1965 if (add->Opcode() == Op_AddExactI) { | |
1966 return inline_math_mathExact(add); | |
1967 } else { | |
1968 set_result(add); | |
1969 } | |
1905 return true; | 1970 return true; |
1906 } | 1971 } |
1907 | 1972 |
1908 Node* | 1973 Node* |
1909 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) { | 1974 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) { |
2291 // contraint in place. | 2356 // contraint in place. |
2292 if (sharpened_klass != NULL && sharpened_klass->is_loaded()) { | 2357 if (sharpened_klass != NULL && sharpened_klass->is_loaded()) { |
2293 const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass); | 2358 const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass); |
2294 | 2359 |
2295 #ifndef PRODUCT | 2360 #ifndef PRODUCT |
2296 if (PrintIntrinsics || PrintInlining || PrintOptoInlining) { | 2361 if (C->print_intrinsics() || C->print_inlining()) { |
2297 tty->print(" from base type: "); adr_type->dump(); | 2362 tty->print(" from base type: "); adr_type->dump(); |
2298 tty->print(" sharpened value: "); tjp->dump(); | 2363 tty->print(" sharpened value: "); tjp->dump(); |
2299 } | 2364 } |
2300 #endif | 2365 #endif |
2301 // Sharpen the value type. | 2366 // Sharpen the value type. |
2753 // Execute transformation here to avoid barrier generation in such case. | 2818 // Execute transformation here to avoid barrier generation in such case. |
2754 if (_gvn.type(newval) == TypePtr::NULL_PTR) | 2819 if (_gvn.type(newval) == TypePtr::NULL_PTR) |
2755 newval = _gvn.makecon(TypePtr::NULL_PTR); | 2820 newval = _gvn.makecon(TypePtr::NULL_PTR); |
2756 | 2821 |
2757 // Reference stores need a store barrier. | 2822 // Reference stores need a store barrier. |
2758 pre_barrier(true /* do_load*/, | 2823 if (kind == LS_xchg) { |
2759 control(), base, adr, alias_idx, newval, value_type->make_oopptr(), | 2824 // If pre-barrier must execute before the oop store, old value will require do_load here. |
2760 NULL /* pre_val*/, | 2825 if (!can_move_pre_barrier()) { |
2761 T_OBJECT); | 2826 pre_barrier(true /* do_load*/, |
2827 control(), base, adr, alias_idx, newval, value_type->make_oopptr(), | |
2828 NULL /* pre_val*/, | |
2829 T_OBJECT); | |
2830 } // Else move pre_barrier to use load_store value, see below. | |
2831 } else if (kind == LS_cmpxchg) { | |
2832 // Same as for newval above: | |
2833 if (_gvn.type(oldval) == TypePtr::NULL_PTR) { | |
2834 oldval = _gvn.makecon(TypePtr::NULL_PTR); | |
2835 } | |
2836 // The only known value which might get overwritten is oldval. | |
2837 pre_barrier(false /* do_load */, | |
2838 control(), NULL, NULL, max_juint, NULL, NULL, | |
2839 oldval /* pre_val */, | |
2840 T_OBJECT); | |
2841 } else { | |
2842 ShouldNotReachHere(); | |
2843 } | |
2844 | |
2762 #ifdef _LP64 | 2845 #ifdef _LP64 |
2763 if (adr->bottom_type()->is_ptr_to_narrowoop()) { | 2846 if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
2764 Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop())); | 2847 Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop())); |
2765 if (kind == LS_xchg) { | 2848 if (kind == LS_xchg) { |
2766 load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr, | 2849 load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr, |
2792 // main role is to prevent LoadStore nodes from being optimized away | 2875 // main role is to prevent LoadStore nodes from being optimized away |
2793 // when their results aren't used. | 2876 // when their results aren't used. |
2794 Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store)); | 2877 Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store)); |
2795 set_memory(proj, alias_idx); | 2878 set_memory(proj, alias_idx); |
2796 | 2879 |
2880 if (type == T_OBJECT && kind == LS_xchg) { | |
2881 #ifdef _LP64 | |
2882 if (adr->bottom_type()->is_ptr_to_narrowoop()) { | |
2883 load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type())); | |
2884 } | |
2885 #endif | |
2886 if (can_move_pre_barrier()) { | |
2887 // Don't need to load pre_val. The old value is returned by load_store. | |
2888 // The pre_barrier can execute after the xchg as long as no safepoint | |
2889 // gets inserted between them. | |
2890 pre_barrier(false /* do_load */, | |
2891 control(), NULL, NULL, max_juint, NULL, NULL, | |
2892 load_store /* pre_val */, | |
2893 T_OBJECT); | |
2894 } | |
2895 } | |
2896 | |
2797 // Add the trailing membar surrounding the access | 2897 // Add the trailing membar surrounding the access |
2798 insert_mem_bar(Op_MemBarCPUOrder); | 2898 insert_mem_bar(Op_MemBarCPUOrder); |
2799 insert_mem_bar(Op_MemBarAcquire); | 2899 insert_mem_bar(Op_MemBarAcquire); |
2800 | |
2801 #ifdef _LP64 | |
2802 if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) { | |
2803 load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type())); | |
2804 } | |
2805 #endif | |
2806 | 2900 |
2807 assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); | 2901 assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); |
2808 set_result(load_store); | 2902 set_result(load_store); |
2809 return true; | 2903 return true; |
2810 } | 2904 } |
2890 fatal_unexpected_iid(id); | 2984 fatal_unexpected_iid(id); |
2891 return false; | 2985 return false; |
2892 } | 2986 } |
2893 } | 2987 } |
2894 | 2988 |
2989 bool LibraryCallKit::klass_needs_init_guard(Node* kls) { | |
2990 if (!kls->is_Con()) { | |
2991 return true; | |
2992 } | |
2993 const TypeKlassPtr* klsptr = kls->bottom_type()->isa_klassptr(); | |
2994 if (klsptr == NULL) { | |
2995 return true; | |
2996 } | |
2997 ciInstanceKlass* ik = klsptr->klass()->as_instance_klass(); | |
2998 // don't need a guard for a klass that is already initialized | |
2999 return !ik->is_initialized(); | |
3000 } | |
3001 | |
2895 //----------------------------inline_unsafe_allocate--------------------------- | 3002 //----------------------------inline_unsafe_allocate--------------------------- |
2896 // public native Object sun.mics.Unsafe.allocateInstance(Class<?> cls); | 3003 // public native Object sun.misc.Unsafe.allocateInstance(Class<?> cls); |
2897 bool LibraryCallKit::inline_unsafe_allocate() { | 3004 bool LibraryCallKit::inline_unsafe_allocate() { |
2898 if (callee()->is_static()) return false; // caller must have the capability! | 3005 if (callee()->is_static()) return false; // caller must have the capability! |
2899 | 3006 |
2900 null_check_receiver(); // null-check, then ignore | 3007 null_check_receiver(); // null-check, then ignore |
2901 Node* cls = null_check(argument(1)); | 3008 Node* cls = null_check(argument(1)); |
2903 | 3010 |
2904 Node* kls = load_klass_from_mirror(cls, false, NULL, 0); | 3011 Node* kls = load_klass_from_mirror(cls, false, NULL, 0); |
2905 kls = null_check(kls); | 3012 kls = null_check(kls); |
2906 if (stopped()) return true; // argument was like int.class | 3013 if (stopped()) return true; // argument was like int.class |
2907 | 3014 |
2908 // Note: The argument might still be an illegal value like | 3015 Node* test = NULL; |
2909 // Serializable.class or Object[].class. The runtime will handle it. | 3016 if (LibraryCallKit::klass_needs_init_guard(kls)) { |
2910 // But we must make an explicit check for initialization. | 3017 // Note: The argument might still be an illegal value like |
2911 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset())); | 3018 // Serializable.class or Object[].class. The runtime will handle it. |
2912 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler | 3019 // But we must make an explicit check for initialization. |
2913 // can generate code to load it as unsigned byte. | 3020 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset())); |
2914 Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN); | 3021 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler |
2915 Node* bits = intcon(InstanceKlass::fully_initialized); | 3022 // can generate code to load it as unsigned byte. |
2916 Node* test = _gvn.transform(new (C) SubINode(inst, bits)); | 3023 Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN); |
2917 // The 'test' is non-zero if we need to take a slow path. | 3024 Node* bits = intcon(InstanceKlass::fully_initialized); |
3025 test = _gvn.transform(new (C) SubINode(inst, bits)); | |
3026 // The 'test' is non-zero if we need to take a slow path. | |
3027 } | |
2918 | 3028 |
2919 Node* obj = new_instance(kls, test); | 3029 Node* obj = new_instance(kls, test); |
2920 set_result(obj); | 3030 set_result(obj); |
2921 return true; | 3031 return true; |
2922 } | 3032 } |
3207 | 3317 |
3208 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr(); | 3318 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr(); |
3209 if (mirror_con == NULL) return false; // cannot happen? | 3319 if (mirror_con == NULL) return false; // cannot happen? |
3210 | 3320 |
3211 #ifndef PRODUCT | 3321 #ifndef PRODUCT |
3212 if (PrintIntrinsics || PrintInlining || PrintOptoInlining) { | 3322 if (C->print_intrinsics() || C->print_inlining()) { |
3213 ciType* k = mirror_con->java_mirror_type(); | 3323 ciType* k = mirror_con->java_mirror_type(); |
3214 if (k) { | 3324 if (k) { |
3215 tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id())); | 3325 tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id())); |
3216 k->print_name(); | 3326 k->print_name(); |
3217 tty->cr(); | 3327 tty->cr(); |
3681 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call. | 3791 // Helper for hashCode and clone. Peeks inside the vtable to avoid a call. |
3682 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass, | 3792 Node* LibraryCallKit::generate_virtual_guard(Node* obj_klass, |
3683 RegionNode* slow_region) { | 3793 RegionNode* slow_region) { |
3684 ciMethod* method = callee(); | 3794 ciMethod* method = callee(); |
3685 int vtable_index = method->vtable_index(); | 3795 int vtable_index = method->vtable_index(); |
3796 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index, | |
3797 err_msg_res("bad index %d", vtable_index)); | |
3686 // Get the Method* out of the appropriate vtable entry. | 3798 // Get the Method* out of the appropriate vtable entry. |
3687 int entry_offset = (InstanceKlass::vtable_start_offset() + | 3799 int entry_offset = (InstanceKlass::vtable_start_offset() + |
3688 vtable_index*vtableEntry::size()) * wordSize + | 3800 vtable_index*vtableEntry::size()) * wordSize + |
3689 vtableEntry::method_offset_in_bytes(); | 3801 vtableEntry::method_offset_in_bytes(); |
3690 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset); | 3802 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset); |
3731 } else { | 3843 } else { |
3732 // hashCode and clone are not a miranda methods, | 3844 // hashCode and clone are not a miranda methods, |
3733 // so the vtable index is fixed. | 3845 // so the vtable index is fixed. |
3734 // No need to use the linkResolver to get it. | 3846 // No need to use the linkResolver to get it. |
3735 vtable_index = method->vtable_index(); | 3847 vtable_index = method->vtable_index(); |
3848 assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index, | |
3849 err_msg_res("bad index %d", vtable_index)); | |
3736 } | 3850 } |
3737 slow_call = new(C) CallDynamicJavaNode(tf, | 3851 slow_call = new(C) CallDynamicJavaNode(tf, |
3738 SharedRuntime::get_resolve_virtual_call_stub(), | 3852 SharedRuntime::get_resolve_virtual_call_stub(), |
3739 method, vtable_index, bci()); | 3853 method, vtable_index, bci()); |
3740 } else { // neither virtual nor static: opt_virtual | 3854 } else { // neither virtual nor static: opt_virtual |
3895 // NOTE: This code must perform the same logic as JVM_GetCallerClass | 4009 // NOTE: This code must perform the same logic as JVM_GetCallerClass |
3896 // in that it must skip particular security frames and checks for | 4010 // in that it must skip particular security frames and checks for |
3897 // caller sensitive methods. | 4011 // caller sensitive methods. |
3898 bool LibraryCallKit::inline_native_Reflection_getCallerClass() { | 4012 bool LibraryCallKit::inline_native_Reflection_getCallerClass() { |
3899 #ifndef PRODUCT | 4013 #ifndef PRODUCT |
3900 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | 4014 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { |
3901 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass"); | 4015 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass"); |
3902 } | 4016 } |
3903 #endif | 4017 #endif |
3904 | 4018 |
3905 if (!jvms()->has_method()) { | 4019 if (!jvms()->has_method()) { |
3906 #ifndef PRODUCT | 4020 #ifndef PRODUCT |
3907 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | 4021 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { |
3908 tty->print_cr(" Bailing out because intrinsic was inlined at top level"); | 4022 tty->print_cr(" Bailing out because intrinsic was inlined at top level"); |
3909 } | 4023 } |
3910 #endif | 4024 #endif |
3911 return false; | 4025 return false; |
3912 } | 4026 } |
3926 break; | 4040 break; |
3927 case 1: | 4041 case 1: |
3928 // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass). | 4042 // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass). |
3929 if (!m->caller_sensitive()) { | 4043 if (!m->caller_sensitive()) { |
3930 #ifndef PRODUCT | 4044 #ifndef PRODUCT |
3931 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | 4045 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { |
3932 tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n); | 4046 tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n); |
3933 } | 4047 } |
3934 #endif | 4048 #endif |
3935 return false; // bail-out; let JVM_GetCallerClass do the work | 4049 return false; // bail-out; let JVM_GetCallerClass do the work |
3936 } | 4050 } |
3942 ciInstanceKlass* caller_klass = caller_jvms->method()->holder(); | 4056 ciInstanceKlass* caller_klass = caller_jvms->method()->holder(); |
3943 ciInstance* caller_mirror = caller_klass->java_mirror(); | 4057 ciInstance* caller_mirror = caller_klass->java_mirror(); |
3944 set_result(makecon(TypeInstPtr::make(caller_mirror))); | 4058 set_result(makecon(TypeInstPtr::make(caller_mirror))); |
3945 | 4059 |
3946 #ifndef PRODUCT | 4060 #ifndef PRODUCT |
3947 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | 4061 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { |
3948 tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth()); | 4062 tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth()); |
3949 tty->print_cr(" JVM state at this point:"); | 4063 tty->print_cr(" JVM state at this point:"); |
3950 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) { | 4064 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) { |
3951 ciMethod* m = jvms()->of_depth(i)->method(); | 4065 ciMethod* m = jvms()->of_depth(i)->method(); |
3952 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8()); | 4066 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8()); |
3958 break; | 4072 break; |
3959 } | 4073 } |
3960 } | 4074 } |
3961 | 4075 |
3962 #ifndef PRODUCT | 4076 #ifndef PRODUCT |
3963 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | 4077 if ((C->print_intrinsics() || C->print_inlining()) && Verbose) { |
3964 tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth()); | 4078 tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth()); |
3965 tty->print_cr(" JVM state at this point:"); | 4079 tty->print_cr(" JVM state at this point:"); |
3966 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) { | 4080 for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) { |
3967 ciMethod* m = jvms()->of_depth(i)->method(); | 4081 ciMethod* m = jvms()->of_depth(i)->method(); |
3968 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8()); | 4082 tty->print_cr(" %d) %s.%s", n, m->holder()->name()->as_utf8(), m->name()->as_utf8()); |
4151 // base_off: | 4265 // base_off: |
4152 // 8 - 32-bit VM | 4266 // 8 - 32-bit VM |
4153 // 12 - 64-bit VM, compressed klass | 4267 // 12 - 64-bit VM, compressed klass |
4154 // 16 - 64-bit VM, normal klass | 4268 // 16 - 64-bit VM, normal klass |
4155 if (base_off % BytesPerLong != 0) { | 4269 if (base_off % BytesPerLong != 0) { |
4156 assert(UseCompressedKlassPointers, ""); | 4270 assert(UseCompressedClassPointers, ""); |
4157 if (is_array) { | 4271 if (is_array) { |
4158 // Exclude length to copy by 8 bytes words. | 4272 // Exclude length to copy by 8 bytes words. |
4159 base_off += sizeof(int); | 4273 base_off += sizeof(int); |
4160 } else { | 4274 } else { |
4161 // Include klass to copy by 8 bytes words. | 4275 // Include klass to copy by 8 bytes words. |