Mercurial > hg > graal-jvmci-8
comparison src/share/vm/opto/library_call.cpp @ 7194:beebba0acc11
7172640: C2: instrinsic implementations in LibraryCallKit should use argument() instead of pop()
Reviewed-by: kvn, jrose
author | twisti |
---|---|
date | Mon, 26 Nov 2012 17:25:11 -0800 |
parents | a3ecd773a7b9 |
children | dd38cfd12c3a |
comparison
equal
deleted
inserted
replaced
7193:ee32440febeb | 7194:beebba0acc11 |
---|---|
65 | 65 |
66 | 66 |
67 // Local helper class for LibraryIntrinsic: | 67 // Local helper class for LibraryIntrinsic: |
68 class LibraryCallKit : public GraphKit { | 68 class LibraryCallKit : public GraphKit { |
69 private: | 69 private: |
70 LibraryIntrinsic* _intrinsic; // the library intrinsic being called | 70 LibraryIntrinsic* _intrinsic; // the library intrinsic being called |
71 Node* _result; // the result node, if any | |
72 int _reexecute_sp; // the stack pointer when bytecode needs to be reexecuted | |
71 | 73 |
72 const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr = false); | 74 const TypeOopPtr* sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr = false); |
73 | 75 |
74 public: | 76 public: |
75 LibraryCallKit(JVMState* caller, LibraryIntrinsic* intrinsic) | 77 LibraryCallKit(JVMState* jvms, LibraryIntrinsic* intrinsic) |
76 : GraphKit(caller), | 78 : GraphKit(jvms), |
77 _intrinsic(intrinsic) | 79 _intrinsic(intrinsic), |
80 _result(NULL) | |
78 { | 81 { |
79 } | 82 // Find out how many arguments the interpreter needs when deoptimizing |
83 // and save the stack pointer value so it can used by uncommon_trap. | |
84 // We find the argument count by looking at the declared signature. | |
85 bool ignored_will_link; | |
86 ciSignature* declared_signature = NULL; | |
87 ciMethod* ignored_callee = caller()->get_method_at_bci(bci(), ignored_will_link, &declared_signature); | |
88 const int nargs = declared_signature->arg_size_for_bc(caller()->java_code_at_bci(bci())); | |
89 _reexecute_sp = sp() + nargs; // "push" arguments back on stack | |
90 } | |
91 | |
92 virtual LibraryCallKit* is_LibraryCallKit() const { return (LibraryCallKit*)this; } | |
80 | 93 |
81 ciMethod* caller() const { return jvms()->method(); } | 94 ciMethod* caller() const { return jvms()->method(); } |
82 int bci() const { return jvms()->bci(); } | 95 int bci() const { return jvms()->bci(); } |
83 LibraryIntrinsic* intrinsic() const { return _intrinsic; } | 96 LibraryIntrinsic* intrinsic() const { return _intrinsic; } |
84 vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); } | 97 vmIntrinsics::ID intrinsic_id() const { return _intrinsic->intrinsic_id(); } |
85 ciMethod* callee() const { return _intrinsic->method(); } | 98 ciMethod* callee() const { return _intrinsic->method(); } |
86 ciSignature* signature() const { return callee()->signature(); } | |
87 int arg_size() const { return callee()->arg_size(); } | |
88 | 99 |
89 bool try_to_inline(); | 100 bool try_to_inline(); |
90 Node* try_to_predicate(); | 101 Node* try_to_predicate(); |
91 | 102 |
103 void push_result() { | |
104 // Push the result onto the stack. | |
105 if (!stopped() && result() != NULL) { | |
106 BasicType bt = result()->bottom_type()->basic_type(); | |
107 push_node(bt, result()); | |
108 } | |
109 } | |
110 | |
111 private: | |
112 void fatal_unexpected_iid(vmIntrinsics::ID iid) { | |
113 fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); | |
114 } | |
115 | |
116 void set_result(Node* n) { assert(_result == NULL, "only set once"); _result = n; } | |
117 void set_result(RegionNode* region, PhiNode* value); | |
118 Node* result() { return _result; } | |
119 | |
120 virtual int reexecute_sp() { return _reexecute_sp; } | |
121 | |
92 // Helper functions to inline natives | 122 // Helper functions to inline natives |
93 void push_result(RegionNode* region, PhiNode* value); | |
94 Node* generate_guard(Node* test, RegionNode* region, float true_prob); | 123 Node* generate_guard(Node* test, RegionNode* region, float true_prob); |
95 Node* generate_slow_guard(Node* test, RegionNode* region); | 124 Node* generate_slow_guard(Node* test, RegionNode* region); |
96 Node* generate_fair_guard(Node* test, RegionNode* region); | 125 Node* generate_fair_guard(Node* test, RegionNode* region); |
97 Node* generate_negative_guard(Node* index, RegionNode* region, | 126 Node* generate_negative_guard(Node* index, RegionNode* region, |
98 // resulting CastII of index: | 127 // resulting CastII of index: |
106 Node* generate_current_thread(Node* &tls_output); | 135 Node* generate_current_thread(Node* &tls_output); |
107 address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset, | 136 address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset, |
108 bool disjoint_bases, const char* &name, bool dest_uninitialized); | 137 bool disjoint_bases, const char* &name, bool dest_uninitialized); |
109 Node* load_mirror_from_klass(Node* klass); | 138 Node* load_mirror_from_klass(Node* klass); |
110 Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null, | 139 Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null, |
111 int nargs, | |
112 RegionNode* region, int null_path, | 140 RegionNode* region, int null_path, |
113 int offset); | 141 int offset); |
114 Node* load_klass_from_mirror(Node* mirror, bool never_see_null, int nargs, | 142 Node* load_klass_from_mirror(Node* mirror, bool never_see_null, |
115 RegionNode* region, int null_path) { | 143 RegionNode* region, int null_path) { |
116 int offset = java_lang_Class::klass_offset_in_bytes(); | 144 int offset = java_lang_Class::klass_offset_in_bytes(); |
117 return load_klass_from_mirror_common(mirror, never_see_null, nargs, | 145 return load_klass_from_mirror_common(mirror, never_see_null, |
118 region, null_path, | 146 region, null_path, |
119 offset); | 147 offset); |
120 } | 148 } |
121 Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null, | 149 Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null, |
122 int nargs, | |
123 RegionNode* region, int null_path) { | 150 RegionNode* region, int null_path) { |
124 int offset = java_lang_Class::array_klass_offset_in_bytes(); | 151 int offset = java_lang_Class::array_klass_offset_in_bytes(); |
125 return load_klass_from_mirror_common(mirror, never_see_null, nargs, | 152 return load_klass_from_mirror_common(mirror, never_see_null, |
126 region, null_path, | 153 region, null_path, |
127 offset); | 154 offset); |
128 } | 155 } |
129 Node* generate_access_flags_guard(Node* kls, | 156 Node* generate_access_flags_guard(Node* kls, |
130 int modifier_mask, int modifier_bits, | 157 int modifier_mask, int modifier_bits, |
159 Node* make_string_method_node(int opcode, Node* str1, Node* str2); | 186 Node* make_string_method_node(int opcode, Node* str1, Node* str2); |
160 bool inline_string_compareTo(); | 187 bool inline_string_compareTo(); |
161 bool inline_string_indexOf(); | 188 bool inline_string_indexOf(); |
162 Node* string_indexOf(Node* string_object, ciTypeArray* target_array, jint offset, jint cache_i, jint md2_i); | 189 Node* string_indexOf(Node* string_object, ciTypeArray* target_array, jint offset, jint cache_i, jint md2_i); |
163 bool inline_string_equals(); | 190 bool inline_string_equals(); |
164 Node* pop_math_arg(); | 191 Node* round_double_node(Node* n); |
165 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName); | 192 bool runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName); |
166 bool inline_math_native(vmIntrinsics::ID id); | 193 bool inline_math_native(vmIntrinsics::ID id); |
167 bool inline_trig(vmIntrinsics::ID id); | 194 bool inline_trig(vmIntrinsics::ID id); |
168 bool inline_trans(vmIntrinsics::ID id); | 195 bool inline_math(vmIntrinsics::ID id); |
169 bool inline_abs(vmIntrinsics::ID id); | 196 bool inline_exp(); |
170 bool inline_sqrt(vmIntrinsics::ID id); | 197 bool inline_pow(); |
171 void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName); | 198 void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName); |
172 bool inline_pow(vmIntrinsics::ID id); | |
173 bool inline_exp(vmIntrinsics::ID id); | |
174 bool inline_min_max(vmIntrinsics::ID id); | 199 bool inline_min_max(vmIntrinsics::ID id); |
175 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y); | 200 Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y); |
176 // This returns Type::AnyPtr, RawPtr, or OopPtr. | 201 // This returns Type::AnyPtr, RawPtr, or OopPtr. |
177 int classify_unsafe_addr(Node* &base, Node* &offset); | 202 int classify_unsafe_addr(Node* &base, Node* &offset); |
178 Node* make_unsafe_address(Node* base, Node* offset); | 203 Node* make_unsafe_address(Node* base, Node* offset); |
179 // Helper for inline_unsafe_access. | 204 // Helper for inline_unsafe_access. |
180 // Generates the guards that check whether the result of | 205 // Generates the guards that check whether the result of |
181 // Unsafe.getObject should be recorded in an SATB log buffer. | 206 // Unsafe.getObject should be recorded in an SATB log buffer. |
182 void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, int nargs, bool need_mem_bar); | 207 void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar); |
183 bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile); | 208 bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile); |
184 bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static); | 209 bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static); |
185 bool inline_unsafe_allocate(); | 210 bool inline_unsafe_allocate(); |
186 bool inline_unsafe_copyMemory(); | 211 bool inline_unsafe_copyMemory(); |
187 bool inline_native_currentThread(); | 212 bool inline_native_currentThread(); |
251 Node* copy_length, bool dest_uninitialized); | 276 Node* copy_length, bool dest_uninitialized); |
252 typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind; | 277 typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind; |
253 bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind); | 278 bool inline_unsafe_load_store(BasicType type, LoadStoreKind kind); |
254 bool inline_unsafe_ordered_store(BasicType type); | 279 bool inline_unsafe_ordered_store(BasicType type); |
255 bool inline_fp_conversions(vmIntrinsics::ID id); | 280 bool inline_fp_conversions(vmIntrinsics::ID id); |
256 bool inline_numberOfLeadingZeros(vmIntrinsics::ID id); | 281 bool inline_number_methods(vmIntrinsics::ID id); |
257 bool inline_numberOfTrailingZeros(vmIntrinsics::ID id); | |
258 bool inline_bitCount(vmIntrinsics::ID id); | |
259 bool inline_reverseBytes(vmIntrinsics::ID id); | |
260 | |
261 bool inline_reference_get(); | 282 bool inline_reference_get(); |
262 bool inline_aescrypt_Block(vmIntrinsics::ID id); | 283 bool inline_aescrypt_Block(vmIntrinsics::ID id); |
263 bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id); | 284 bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id); |
264 Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting); | 285 Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting); |
265 Node* get_key_start_from_aescrypt_object(Node* aescrypt_object); | 286 Node* get_key_start_from_aescrypt_object(Node* aescrypt_object); |
319 bool is_predicted = false; | 340 bool is_predicted = false; |
320 | 341 |
321 switch (id) { | 342 switch (id) { |
322 case vmIntrinsics::_compareTo: | 343 case vmIntrinsics::_compareTo: |
323 if (!SpecialStringCompareTo) return NULL; | 344 if (!SpecialStringCompareTo) return NULL; |
345 if (!Matcher::match_rule_supported(Op_StrComp)) return NULL; | |
324 break; | 346 break; |
325 case vmIntrinsics::_indexOf: | 347 case vmIntrinsics::_indexOf: |
326 if (!SpecialStringIndexOf) return NULL; | 348 if (!SpecialStringIndexOf) return NULL; |
327 break; | 349 break; |
328 case vmIntrinsics::_equals: | 350 case vmIntrinsics::_equals: |
329 if (!SpecialStringEquals) return NULL; | 351 if (!SpecialStringEquals) return NULL; |
352 if (!Matcher::match_rule_supported(Op_StrEquals)) return NULL; | |
330 break; | 353 break; |
331 case vmIntrinsics::_equalsC: | 354 case vmIntrinsics::_equalsC: |
332 if (!SpecialArraysEquals) return NULL; | 355 if (!SpecialArraysEquals) return NULL; |
356 if (!Matcher::match_rule_supported(Op_AryEq)) return NULL; | |
333 break; | 357 break; |
334 case vmIntrinsics::_arraycopy: | 358 case vmIntrinsics::_arraycopy: |
335 if (!InlineArrayCopy) return NULL; | 359 if (!InlineArrayCopy) return NULL; |
336 break; | 360 break; |
337 case vmIntrinsics::_copyMemory: | 361 case vmIntrinsics::_copyMemory: |
378 if (!Matcher::match_rule_supported(Op_CountTrailingZerosI)) return NULL; | 402 if (!Matcher::match_rule_supported(Op_CountTrailingZerosI)) return NULL; |
379 break; | 403 break; |
380 | 404 |
381 case vmIntrinsics::_numberOfTrailingZeros_l: | 405 case vmIntrinsics::_numberOfTrailingZeros_l: |
382 if (!Matcher::match_rule_supported(Op_CountTrailingZerosL)) return NULL; | 406 if (!Matcher::match_rule_supported(Op_CountTrailingZerosL)) return NULL; |
407 break; | |
408 | |
409 case vmIntrinsics::_reverseBytes_c: | |
410 if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return false; | |
411 break; | |
412 case vmIntrinsics::_reverseBytes_s: | |
413 if (!Matcher::match_rule_supported(Op_ReverseBytesS)) return false; | |
414 break; | |
415 case vmIntrinsics::_reverseBytes_i: | |
416 if (!Matcher::match_rule_supported(Op_ReverseBytesI)) return false; | |
417 break; | |
418 case vmIntrinsics::_reverseBytes_l: | |
419 if (!Matcher::match_rule_supported(Op_ReverseBytesL)) return false; | |
383 break; | 420 break; |
384 | 421 |
385 case vmIntrinsics::_Reference_get: | 422 case vmIntrinsics::_Reference_get: |
386 // Use the intrinsic version of Reference.get() so that the value in | 423 // Use the intrinsic version of Reference.get() so that the value in |
387 // the referent field can be registered by the G1 pre-barrier code. | 424 // the referent field can be registered by the G1 pre-barrier code. |
486 char buf[1000]; | 523 char buf[1000]; |
487 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); | 524 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); |
488 tty->print_cr("Intrinsic %s", str); | 525 tty->print_cr("Intrinsic %s", str); |
489 } | 526 } |
490 #endif | 527 #endif |
491 | 528 ciMethod* callee = kit.callee(); |
529 const int bci = kit.bci(); | |
530 | |
531 // Try to inline the intrinsic. | |
492 if (kit.try_to_inline()) { | 532 if (kit.try_to_inline()) { |
493 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { | 533 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { |
494 CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); | 534 CompileTask::print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); |
495 } | 535 } |
496 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); | 536 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); |
497 if (C->log()) { | 537 if (C->log()) { |
498 C->log()->elem("intrinsic id='%s'%s nodes='%d'", | 538 C->log()->elem("intrinsic id='%s'%s nodes='%d'", |
499 vmIntrinsics::name_at(intrinsic_id()), | 539 vmIntrinsics::name_at(intrinsic_id()), |
500 (is_virtual() ? " virtual='1'" : ""), | 540 (is_virtual() ? " virtual='1'" : ""), |
501 C->unique() - nodes); | 541 C->unique() - nodes); |
502 } | 542 } |
543 // Push the result from the inlined method onto the stack. | |
544 kit.push_result(); | |
503 return kit.transfer_exceptions_into_jvms(); | 545 return kit.transfer_exceptions_into_jvms(); |
504 } | 546 } |
505 | 547 |
506 // The intrinsic bailed out | 548 // The intrinsic bailed out |
507 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { | 549 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { |
508 if (jvms->has_method()) { | 550 if (jvms->has_method()) { |
509 // Not a root compile. | 551 // Not a root compile. |
510 const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)"; | 552 const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)"; |
511 CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), msg); | 553 CompileTask::print_inlining(callee, jvms->depth() - 1, bci, msg); |
512 } else { | 554 } else { |
513 // Root compile | 555 // Root compile |
514 tty->print("Did not generate intrinsic %s%s at bci:%d in", | 556 tty->print("Did not generate intrinsic %s%s at bci:%d in", |
515 vmIntrinsics::name_at(intrinsic_id()), | 557 vmIntrinsics::name_at(intrinsic_id()), |
516 (is_virtual() ? " (virtual)" : ""), kit.bci()); | 558 (is_virtual() ? " (virtual)" : ""), bci); |
517 } | 559 } |
518 } | 560 } |
519 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed); | 561 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed); |
520 return NULL; | 562 return NULL; |
521 } | 563 } |
530 char buf[1000]; | 572 char buf[1000]; |
531 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); | 573 const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf)); |
532 tty->print_cr("Predicate for intrinsic %s", str); | 574 tty->print_cr("Predicate for intrinsic %s", str); |
533 } | 575 } |
534 #endif | 576 #endif |
577 ciMethod* callee = kit.callee(); | |
578 const int bci = kit.bci(); | |
535 | 579 |
536 Node* slow_ctl = kit.try_to_predicate(); | 580 Node* slow_ctl = kit.try_to_predicate(); |
537 if (!kit.failing()) { | 581 if (!kit.failing()) { |
582 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { | |
583 CompileTask::print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)"); | |
584 } | |
585 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked); | |
538 if (C->log()) { | 586 if (C->log()) { |
539 C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'", | 587 C->log()->elem("predicate_intrinsic id='%s'%s nodes='%d'", |
540 vmIntrinsics::name_at(intrinsic_id()), | 588 vmIntrinsics::name_at(intrinsic_id()), |
541 (is_virtual() ? " virtual='1'" : ""), | 589 (is_virtual() ? " virtual='1'" : ""), |
542 C->unique() - nodes); | 590 C->unique() - nodes); |
547 // The intrinsic bailed out | 595 // The intrinsic bailed out |
548 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { | 596 if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) { |
549 if (jvms->has_method()) { | 597 if (jvms->has_method()) { |
550 // Not a root compile. | 598 // Not a root compile. |
551 const char* msg = "failed to generate predicate for intrinsic"; | 599 const char* msg = "failed to generate predicate for intrinsic"; |
552 CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, kit.bci(), msg); | 600 CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, bci, msg); |
553 } else { | 601 } else { |
554 // Root compile | 602 // Root compile |
555 tty->print("Did not generate predicate for intrinsic %s%s at bci:%d in", | 603 tty->print("Did not generate predicate for intrinsic %s%s at bci:%d in", |
556 vmIntrinsics::name_at(intrinsic_id()), | 604 vmIntrinsics::name_at(intrinsic_id()), |
557 (is_virtual() ? " (virtual)" : ""), kit.bci()); | 605 (is_virtual() ? " (virtual)" : ""), bci); |
558 } | 606 } |
559 } | 607 } |
560 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed); | 608 C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed); |
561 return NULL; | 609 return NULL; |
562 } | 610 } |
564 bool LibraryCallKit::try_to_inline() { | 612 bool LibraryCallKit::try_to_inline() { |
565 // Handle symbolic names for otherwise undistinguished boolean switches: | 613 // Handle symbolic names for otherwise undistinguished boolean switches: |
566 const bool is_store = true; | 614 const bool is_store = true; |
567 const bool is_native_ptr = true; | 615 const bool is_native_ptr = true; |
568 const bool is_static = true; | 616 const bool is_static = true; |
617 const bool is_volatile = true; | |
569 | 618 |
570 if (!jvms()->has_method()) { | 619 if (!jvms()->has_method()) { |
571 // Root JVMState has a null method. | 620 // Root JVMState has a null method. |
572 assert(map()->memory()->Opcode() == Op_Parm, ""); | 621 assert(map()->memory()->Opcode() == Op_Parm, ""); |
573 // Insert the memory aliasing node | 622 // Insert the memory aliasing node |
574 set_all_memory(reset_memory()); | 623 set_all_memory(reset_memory()); |
575 } | 624 } |
576 assert(merged_memory(), ""); | 625 assert(merged_memory(), ""); |
577 | 626 |
627 | |
578 switch (intrinsic_id()) { | 628 switch (intrinsic_id()) { |
579 case vmIntrinsics::_hashCode: | 629 case vmIntrinsics::_hashCode: return inline_native_hashcode(intrinsic()->is_virtual(), !is_static); |
580 return inline_native_hashcode(intrinsic()->is_virtual(), !is_static); | 630 case vmIntrinsics::_identityHashCode: return inline_native_hashcode(/*!virtual*/ false, is_static); |
581 case vmIntrinsics::_identityHashCode: | 631 case vmIntrinsics::_getClass: return inline_native_getClass(); |
582 return inline_native_hashcode(/*!virtual*/ false, is_static); | |
583 case vmIntrinsics::_getClass: | |
584 return inline_native_getClass(); | |
585 | 632 |
586 case vmIntrinsics::_dsin: | 633 case vmIntrinsics::_dsin: |
587 case vmIntrinsics::_dcos: | 634 case vmIntrinsics::_dcos: |
588 case vmIntrinsics::_dtan: | 635 case vmIntrinsics::_dtan: |
589 case vmIntrinsics::_dabs: | 636 case vmIntrinsics::_dabs: |
590 case vmIntrinsics::_datan2: | 637 case vmIntrinsics::_datan2: |
591 case vmIntrinsics::_dsqrt: | 638 case vmIntrinsics::_dsqrt: |
592 case vmIntrinsics::_dexp: | 639 case vmIntrinsics::_dexp: |
593 case vmIntrinsics::_dlog: | 640 case vmIntrinsics::_dlog: |
594 case vmIntrinsics::_dlog10: | 641 case vmIntrinsics::_dlog10: |
595 case vmIntrinsics::_dpow: | 642 case vmIntrinsics::_dpow: return inline_math_native(intrinsic_id()); |
596 return inline_math_native(intrinsic_id()); | |
597 | 643 |
598 case vmIntrinsics::_min: | 644 case vmIntrinsics::_min: |
599 case vmIntrinsics::_max: | 645 case vmIntrinsics::_max: return inline_min_max(intrinsic_id()); |
600 return inline_min_max(intrinsic_id()); | 646 |
601 | 647 case vmIntrinsics::_arraycopy: return inline_arraycopy(); |
602 case vmIntrinsics::_arraycopy: | 648 |
603 return inline_arraycopy(); | 649 case vmIntrinsics::_compareTo: return inline_string_compareTo(); |
604 | 650 case vmIntrinsics::_indexOf: return inline_string_indexOf(); |
605 case vmIntrinsics::_compareTo: | 651 case vmIntrinsics::_equals: return inline_string_equals(); |
606 return inline_string_compareTo(); | 652 |
607 case vmIntrinsics::_indexOf: | 653 case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, !is_volatile); |
608 return inline_string_indexOf(); | 654 case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile); |
609 case vmIntrinsics::_equals: | 655 case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, !is_volatile); |
610 return inline_string_equals(); | 656 case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile); |
611 | 657 case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile); |
612 case vmIntrinsics::_getObject: | 658 case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile); |
613 return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, false); | 659 case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile); |
614 case vmIntrinsics::_getBoolean: | 660 case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, !is_volatile); |
615 return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, false); | 661 case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, !is_volatile); |
616 case vmIntrinsics::_getByte: | 662 |
617 return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, false); | 663 case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, !is_volatile); |
618 case vmIntrinsics::_getShort: | 664 case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, !is_volatile); |
619 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, false); | 665 case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, !is_volatile); |
620 case vmIntrinsics::_getChar: | 666 case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile); |
621 return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, false); | 667 case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile); |
622 case vmIntrinsics::_getInt: | 668 case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile); |
623 return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, false); | 669 case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile); |
624 case vmIntrinsics::_getLong: | 670 case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, !is_volatile); |
625 return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, false); | 671 case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, !is_volatile); |
626 case vmIntrinsics::_getFloat: | 672 |
627 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, false); | 673 case vmIntrinsics::_getByte_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE, !is_volatile); |
628 case vmIntrinsics::_getDouble: | 674 case vmIntrinsics::_getShort_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT, !is_volatile); |
629 return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, false); | 675 case vmIntrinsics::_getChar_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR, !is_volatile); |
630 | 676 case vmIntrinsics::_getInt_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_INT, !is_volatile); |
631 case vmIntrinsics::_putObject: | 677 case vmIntrinsics::_getLong_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_LONG, !is_volatile); |
632 return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, false); | 678 case vmIntrinsics::_getFloat_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT, !is_volatile); |
633 case vmIntrinsics::_putBoolean: | 679 case vmIntrinsics::_getDouble_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE, !is_volatile); |
634 return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, false); | 680 case vmIntrinsics::_getAddress_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile); |
635 case vmIntrinsics::_putByte: | 681 |
636 return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, false); | 682 case vmIntrinsics::_putByte_raw: return inline_unsafe_access( is_native_ptr, is_store, T_BYTE, !is_volatile); |
637 case vmIntrinsics::_putShort: | 683 case vmIntrinsics::_putShort_raw: return inline_unsafe_access( is_native_ptr, is_store, T_SHORT, !is_volatile); |
638 return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, false); | 684 case vmIntrinsics::_putChar_raw: return inline_unsafe_access( is_native_ptr, is_store, T_CHAR, !is_volatile); |
639 case vmIntrinsics::_putChar: | 685 case vmIntrinsics::_putInt_raw: return inline_unsafe_access( is_native_ptr, is_store, T_INT, !is_volatile); |
640 return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, false); | 686 case vmIntrinsics::_putLong_raw: return inline_unsafe_access( is_native_ptr, is_store, T_LONG, !is_volatile); |
641 case vmIntrinsics::_putInt: | 687 case vmIntrinsics::_putFloat_raw: return inline_unsafe_access( is_native_ptr, is_store, T_FLOAT, !is_volatile); |
642 return inline_unsafe_access(!is_native_ptr, is_store, T_INT, false); | 688 case vmIntrinsics::_putDouble_raw: return inline_unsafe_access( is_native_ptr, is_store, T_DOUBLE, !is_volatile); |
643 case vmIntrinsics::_putLong: | 689 case vmIntrinsics::_putAddress_raw: return inline_unsafe_access( is_native_ptr, is_store, T_ADDRESS, !is_volatile); |
644 return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, false); | 690 |
645 case vmIntrinsics::_putFloat: | 691 case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, is_volatile); |
646 return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, false); | 692 case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, is_volatile); |
647 case vmIntrinsics::_putDouble: | 693 case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, is_volatile); |
648 return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, false); | 694 case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, is_volatile); |
649 | 695 case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, is_volatile); |
650 case vmIntrinsics::_getByte_raw: | 696 case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, is_volatile); |
651 return inline_unsafe_access(is_native_ptr, !is_store, T_BYTE, false); | 697 case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, is_volatile); |
652 case vmIntrinsics::_getShort_raw: | 698 case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, is_volatile); |
653 return inline_unsafe_access(is_native_ptr, !is_store, T_SHORT, false); | 699 case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, is_volatile); |
654 case vmIntrinsics::_getChar_raw: | 700 |
655 return inline_unsafe_access(is_native_ptr, !is_store, T_CHAR, false); | 701 case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, is_volatile); |
656 case vmIntrinsics::_getInt_raw: | 702 case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, is_volatile); |
657 return inline_unsafe_access(is_native_ptr, !is_store, T_INT, false); | 703 case vmIntrinsics::_putByteVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, is_volatile); |
658 case vmIntrinsics::_getLong_raw: | 704 case vmIntrinsics::_putShortVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, is_volatile); |
659 return inline_unsafe_access(is_native_ptr, !is_store, T_LONG, false); | 705 case vmIntrinsics::_putCharVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, is_volatile); |
660 case vmIntrinsics::_getFloat_raw: | 706 case vmIntrinsics::_putIntVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, is_volatile); |
661 return inline_unsafe_access(is_native_ptr, !is_store, T_FLOAT, false); | 707 case vmIntrinsics::_putLongVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, is_volatile); |
662 case vmIntrinsics::_getDouble_raw: | 708 case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, is_volatile); |
663 return inline_unsafe_access(is_native_ptr, !is_store, T_DOUBLE, false); | 709 case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, is_volatile); |
664 case vmIntrinsics::_getAddress_raw: | 710 |
665 return inline_unsafe_access(is_native_ptr, !is_store, T_ADDRESS, false); | 711 case vmIntrinsics::_prefetchRead: return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static); |
666 | 712 case vmIntrinsics::_prefetchWrite: return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static); |
667 case vmIntrinsics::_putByte_raw: | 713 case vmIntrinsics::_prefetchReadStatic: return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static); |
668 return inline_unsafe_access(is_native_ptr, is_store, T_BYTE, false); | 714 case vmIntrinsics::_prefetchWriteStatic: return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static); |
669 case vmIntrinsics::_putShort_raw: | 715 |
670 return inline_unsafe_access(is_native_ptr, is_store, T_SHORT, false); | 716 case vmIntrinsics::_compareAndSwapObject: return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg); |
671 case vmIntrinsics::_putChar_raw: | 717 case vmIntrinsics::_compareAndSwapInt: return inline_unsafe_load_store(T_INT, LS_cmpxchg); |
672 return inline_unsafe_access(is_native_ptr, is_store, T_CHAR, false); | 718 case vmIntrinsics::_compareAndSwapLong: return inline_unsafe_load_store(T_LONG, LS_cmpxchg); |
673 case vmIntrinsics::_putInt_raw: | 719 |
674 return inline_unsafe_access(is_native_ptr, is_store, T_INT, false); | 720 case vmIntrinsics::_putOrderedObject: return inline_unsafe_ordered_store(T_OBJECT); |
675 case vmIntrinsics::_putLong_raw: | 721 case vmIntrinsics::_putOrderedInt: return inline_unsafe_ordered_store(T_INT); |
676 return inline_unsafe_access(is_native_ptr, is_store, T_LONG, false); | 722 case vmIntrinsics::_putOrderedLong: return inline_unsafe_ordered_store(T_LONG); |
677 case vmIntrinsics::_putFloat_raw: | 723 |
678 return inline_unsafe_access(is_native_ptr, is_store, T_FLOAT, false); | 724 case vmIntrinsics::_getAndAddInt: return inline_unsafe_load_store(T_INT, LS_xadd); |
679 case vmIntrinsics::_putDouble_raw: | 725 case vmIntrinsics::_getAndAddLong: return inline_unsafe_load_store(T_LONG, LS_xadd); |
680 return inline_unsafe_access(is_native_ptr, is_store, T_DOUBLE, false); | 726 case vmIntrinsics::_getAndSetInt: return inline_unsafe_load_store(T_INT, LS_xchg); |
681 case vmIntrinsics::_putAddress_raw: | 727 case vmIntrinsics::_getAndSetLong: return inline_unsafe_load_store(T_LONG, LS_xchg); |
682 return inline_unsafe_access(is_native_ptr, is_store, T_ADDRESS, false); | 728 case vmIntrinsics::_getAndSetObject: return inline_unsafe_load_store(T_OBJECT, LS_xchg); |
683 | 729 |
684 case vmIntrinsics::_getObjectVolatile: | 730 case vmIntrinsics::_currentThread: return inline_native_currentThread(); |
685 return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, true); | 731 case vmIntrinsics::_isInterrupted: return inline_native_isInterrupted(); |
686 case vmIntrinsics::_getBooleanVolatile: | |
687 return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, true); | |
688 case vmIntrinsics::_getByteVolatile: | |
689 return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, true); | |
690 case vmIntrinsics::_getShortVolatile: | |
691 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, true); | |
692 case vmIntrinsics::_getCharVolatile: | |
693 return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, true); | |
694 case vmIntrinsics::_getIntVolatile: | |
695 return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, true); | |
696 case vmIntrinsics::_getLongVolatile: | |
697 return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, true); | |
698 case vmIntrinsics::_getFloatVolatile: | |
699 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, true); | |
700 case vmIntrinsics::_getDoubleVolatile: | |
701 return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, true); | |
702 | |
703 case vmIntrinsics::_putObjectVolatile: | |
704 return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, true); | |
705 case vmIntrinsics::_putBooleanVolatile: | |
706 return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, true); | |
707 case vmIntrinsics::_putByteVolatile: | |
708 return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, true); | |
709 case vmIntrinsics::_putShortVolatile: | |
710 return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, true); | |
711 case vmIntrinsics::_putCharVolatile: | |
712 return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, true); | |
713 case vmIntrinsics::_putIntVolatile: | |
714 return inline_unsafe_access(!is_native_ptr, is_store, T_INT, true); | |
715 case vmIntrinsics::_putLongVolatile: | |
716 return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, true); | |
717 case vmIntrinsics::_putFloatVolatile: | |
718 return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, true); | |
719 case vmIntrinsics::_putDoubleVolatile: | |
720 return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, true); | |
721 | |
722 case vmIntrinsics::_prefetchRead: | |
723 return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static); | |
724 case vmIntrinsics::_prefetchWrite: | |
725 return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static); | |
726 case vmIntrinsics::_prefetchReadStatic: | |
727 return inline_unsafe_prefetch(!is_native_ptr, !is_store, is_static); | |
728 case vmIntrinsics::_prefetchWriteStatic: | |
729 return inline_unsafe_prefetch(!is_native_ptr, is_store, is_static); | |
730 | |
731 case vmIntrinsics::_compareAndSwapObject: | |
732 return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg); | |
733 case vmIntrinsics::_compareAndSwapInt: | |
734 return inline_unsafe_load_store(T_INT, LS_cmpxchg); | |
735 case vmIntrinsics::_compareAndSwapLong: | |
736 return inline_unsafe_load_store(T_LONG, LS_cmpxchg); | |
737 | |
738 case vmIntrinsics::_putOrderedObject: | |
739 return inline_unsafe_ordered_store(T_OBJECT); | |
740 case vmIntrinsics::_putOrderedInt: | |
741 return inline_unsafe_ordered_store(T_INT); | |
742 case vmIntrinsics::_putOrderedLong: | |
743 return inline_unsafe_ordered_store(T_LONG); | |
744 | |
745 case vmIntrinsics::_getAndAddInt: | |
746 return inline_unsafe_load_store(T_INT, LS_xadd); | |
747 case vmIntrinsics::_getAndAddLong: | |
748 return inline_unsafe_load_store(T_LONG, LS_xadd); | |
749 case vmIntrinsics::_getAndSetInt: | |
750 return inline_unsafe_load_store(T_INT, LS_xchg); | |
751 case vmIntrinsics::_getAndSetLong: | |
752 return inline_unsafe_load_store(T_LONG, LS_xchg); | |
753 case vmIntrinsics::_getAndSetObject: | |
754 return inline_unsafe_load_store(T_OBJECT, LS_xchg); | |
755 | |
756 case vmIntrinsics::_currentThread: | |
757 return inline_native_currentThread(); | |
758 case vmIntrinsics::_isInterrupted: | |
759 return inline_native_isInterrupted(); | |
760 | 732 |
761 #ifdef TRACE_HAVE_INTRINSICS | 733 #ifdef TRACE_HAVE_INTRINSICS |
762 case vmIntrinsics::_classID: | 734 case vmIntrinsics::_classID: return inline_native_classID(); |
763 return inline_native_classID(); | 735 case vmIntrinsics::_threadID: return inline_native_threadID(); |
764 case vmIntrinsics::_threadID: | 736 case vmIntrinsics::_counterTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime"); |
765 return inline_native_threadID(); | |
766 case vmIntrinsics::_counterTime: | |
767 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), "counterTime"); | |
768 #endif | 737 #endif |
769 case vmIntrinsics::_currentTimeMillis: | 738 case vmIntrinsics::_currentTimeMillis: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis"); |
770 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeMillis), "currentTimeMillis"); | 739 case vmIntrinsics::_nanoTime: return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime"); |
771 case vmIntrinsics::_nanoTime: | 740 case vmIntrinsics::_allocateInstance: return inline_unsafe_allocate(); |
772 return inline_native_time_funcs(CAST_FROM_FN_PTR(address, os::javaTimeNanos), "nanoTime"); | 741 case vmIntrinsics::_copyMemory: return inline_unsafe_copyMemory(); |
773 case vmIntrinsics::_allocateInstance: | 742 case vmIntrinsics::_newArray: return inline_native_newArray(); |
774 return inline_unsafe_allocate(); | 743 case vmIntrinsics::_getLength: return inline_native_getLength(); |
775 case vmIntrinsics::_copyMemory: | 744 case vmIntrinsics::_copyOf: return inline_array_copyOf(false); |
776 return inline_unsafe_copyMemory(); | 745 case vmIntrinsics::_copyOfRange: return inline_array_copyOf(true); |
777 case vmIntrinsics::_newArray: | 746 case vmIntrinsics::_equalsC: return inline_array_equals(); |
778 return inline_native_newArray(); | 747 case vmIntrinsics::_clone: return inline_native_clone(intrinsic()->is_virtual()); |
779 case vmIntrinsics::_getLength: | 748 |
780 return inline_native_getLength(); | 749 case vmIntrinsics::_isAssignableFrom: return inline_native_subtype_check(); |
781 case vmIntrinsics::_copyOf: | |
782 return inline_array_copyOf(false); | |
783 case vmIntrinsics::_copyOfRange: | |
784 return inline_array_copyOf(true); | |
785 case vmIntrinsics::_equalsC: | |
786 return inline_array_equals(); | |
787 case vmIntrinsics::_clone: | |
788 return inline_native_clone(intrinsic()->is_virtual()); | |
789 | |
790 case vmIntrinsics::_isAssignableFrom: | |
791 return inline_native_subtype_check(); | |
792 | 750 |
793 case vmIntrinsics::_isInstance: | 751 case vmIntrinsics::_isInstance: |
794 case vmIntrinsics::_getModifiers: | 752 case vmIntrinsics::_getModifiers: |
795 case vmIntrinsics::_isInterface: | 753 case vmIntrinsics::_isInterface: |
796 case vmIntrinsics::_isArray: | 754 case vmIntrinsics::_isArray: |
797 case vmIntrinsics::_isPrimitive: | 755 case vmIntrinsics::_isPrimitive: |
798 case vmIntrinsics::_getSuperclass: | 756 case vmIntrinsics::_getSuperclass: |
799 case vmIntrinsics::_getComponentType: | 757 case vmIntrinsics::_getComponentType: |
800 case vmIntrinsics::_getClassAccessFlags: | 758 case vmIntrinsics::_getClassAccessFlags: return inline_native_Class_query(intrinsic_id()); |
801 return inline_native_Class_query(intrinsic_id()); | |
802 | 759 |
803 case vmIntrinsics::_floatToRawIntBits: | 760 case vmIntrinsics::_floatToRawIntBits: |
804 case vmIntrinsics::_floatToIntBits: | 761 case vmIntrinsics::_floatToIntBits: |
805 case vmIntrinsics::_intBitsToFloat: | 762 case vmIntrinsics::_intBitsToFloat: |
806 case vmIntrinsics::_doubleToRawLongBits: | 763 case vmIntrinsics::_doubleToRawLongBits: |
807 case vmIntrinsics::_doubleToLongBits: | 764 case vmIntrinsics::_doubleToLongBits: |
808 case vmIntrinsics::_longBitsToDouble: | 765 case vmIntrinsics::_longBitsToDouble: return inline_fp_conversions(intrinsic_id()); |
809 return inline_fp_conversions(intrinsic_id()); | |
810 | 766 |
811 case vmIntrinsics::_numberOfLeadingZeros_i: | 767 case vmIntrinsics::_numberOfLeadingZeros_i: |
812 case vmIntrinsics::_numberOfLeadingZeros_l: | 768 case vmIntrinsics::_numberOfLeadingZeros_l: |
813 return inline_numberOfLeadingZeros(intrinsic_id()); | |
814 | |
815 case vmIntrinsics::_numberOfTrailingZeros_i: | 769 case vmIntrinsics::_numberOfTrailingZeros_i: |
816 case vmIntrinsics::_numberOfTrailingZeros_l: | 770 case vmIntrinsics::_numberOfTrailingZeros_l: |
817 return inline_numberOfTrailingZeros(intrinsic_id()); | |
818 | |
819 case vmIntrinsics::_bitCount_i: | 771 case vmIntrinsics::_bitCount_i: |
820 case vmIntrinsics::_bitCount_l: | 772 case vmIntrinsics::_bitCount_l: |
821 return inline_bitCount(intrinsic_id()); | |
822 | |
823 case vmIntrinsics::_reverseBytes_i: | 773 case vmIntrinsics::_reverseBytes_i: |
824 case vmIntrinsics::_reverseBytes_l: | 774 case vmIntrinsics::_reverseBytes_l: |
825 case vmIntrinsics::_reverseBytes_s: | 775 case vmIntrinsics::_reverseBytes_s: |
826 case vmIntrinsics::_reverseBytes_c: | 776 case vmIntrinsics::_reverseBytes_c: return inline_number_methods(intrinsic_id()); |
827 return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id()); | 777 |
828 | 778 case vmIntrinsics::_getCallerClass: return inline_native_Reflection_getCallerClass(); |
829 case vmIntrinsics::_getCallerClass: | 779 |
830 return inline_native_Reflection_getCallerClass(); | 780 case vmIntrinsics::_Reference_get: return inline_reference_get(); |
831 | |
832 case vmIntrinsics::_Reference_get: | |
833 return inline_reference_get(); | |
834 | 781 |
835 case vmIntrinsics::_aescrypt_encryptBlock: | 782 case vmIntrinsics::_aescrypt_encryptBlock: |
836 case vmIntrinsics::_aescrypt_decryptBlock: | 783 case vmIntrinsics::_aescrypt_decryptBlock: return inline_aescrypt_Block(intrinsic_id()); |
837 return inline_aescrypt_Block(intrinsic_id()); | |
838 | 784 |
839 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt: | 785 case vmIntrinsics::_cipherBlockChaining_encryptAESCrypt: |
840 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt: | 786 case vmIntrinsics::_cipherBlockChaining_decryptAESCrypt: |
841 return inline_cipherBlockChaining_AESCrypt(intrinsic_id()); | 787 return inline_cipherBlockChaining_AESCrypt(intrinsic_id()); |
842 | 788 |
881 set_control(top()); // No fast path instrinsic | 827 set_control(top()); // No fast path instrinsic |
882 return slow_ctl; | 828 return slow_ctl; |
883 } | 829 } |
884 } | 830 } |
885 | 831 |
886 //------------------------------push_result------------------------------ | 832 //------------------------------set_result------------------------------- |
887 // Helper function for finishing intrinsics. | 833 // Helper function for finishing intrinsics. |
888 void LibraryCallKit::push_result(RegionNode* region, PhiNode* value) { | 834 void LibraryCallKit::set_result(RegionNode* region, PhiNode* value) { |
889 record_for_igvn(region); | 835 record_for_igvn(region); |
890 set_control(_gvn.transform(region)); | 836 set_control(_gvn.transform(region)); |
891 BasicType value_type = value->type()->basic_type(); | 837 set_result( _gvn.transform(value)); |
892 push_node(value_type, _gvn.transform(value)); | 838 assert(value->type()->basic_type() == result()->bottom_type()->basic_type(), "sanity"); |
893 } | 839 } |
894 | 840 |
895 //------------------------------generate_guard--------------------------- | 841 //------------------------------generate_guard--------------------------- |
896 // Helper function for generating guarded fast-slow graph structures. | 842 // Helper function for generating guarded fast-slow graph structures. |
897 // The given 'test', if true, guards a slow path. If the test fails | 843 // The given 'test', if true, guards a slow path. If the test fails |
1076 // Helper method for String intrinsic functions. This version is called | 1022 // Helper method for String intrinsic functions. This version is called |
1077 // with str1 and str2 pointing to char[] nodes, with cnt1 and cnt2 pointing | 1023 // with str1 and str2 pointing to char[] nodes, with cnt1 and cnt2 pointing |
1078 // to Int nodes containing the lenghts of str1 and str2. | 1024 // to Int nodes containing the lenghts of str1 and str2. |
1079 // | 1025 // |
1080 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2) { | 1026 Node* LibraryCallKit::make_string_method_node(int opcode, Node* str1_start, Node* cnt1, Node* str2_start, Node* cnt2) { |
1081 | |
1082 Node* result = NULL; | 1027 Node* result = NULL; |
1083 switch (opcode) { | 1028 switch (opcode) { |
1084 case Op_StrIndexOf: | 1029 case Op_StrIndexOf: |
1085 result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS), | 1030 result = new (C) StrIndexOfNode(control(), memory(TypeAryPtr::CHARS), |
1086 str1_start, cnt1, str2_start, cnt2); | 1031 str1_start, cnt1, str2_start, cnt2); |
1103 | 1048 |
1104 return _gvn.transform(result); | 1049 return _gvn.transform(result); |
1105 } | 1050 } |
1106 | 1051 |
1107 //------------------------------inline_string_compareTo------------------------ | 1052 //------------------------------inline_string_compareTo------------------------ |
1053 // public int java.lang.String.compareTo(String anotherString); | |
1108 bool LibraryCallKit::inline_string_compareTo() { | 1054 bool LibraryCallKit::inline_string_compareTo() { |
1109 | 1055 Node* receiver = null_check(argument(0)); |
1110 if (!Matcher::has_match_rule(Op_StrComp)) return false; | 1056 Node* arg = null_check(argument(1)); |
1111 | |
1112 _sp += 2; | |
1113 Node *argument = pop(); // pop non-receiver first: it was pushed second | |
1114 Node *receiver = pop(); | |
1115 | |
1116 // Null check on self without removing any arguments. The argument | |
1117 // null check technically happens in the wrong place, which can lead to | |
1118 // invalid stack traces when string compare is inlined into a method | |
1119 // which handles NullPointerExceptions. | |
1120 _sp += 2; | |
1121 receiver = do_null_check(receiver, T_OBJECT); | |
1122 argument = do_null_check(argument, T_OBJECT); | |
1123 _sp -= 2; | |
1124 if (stopped()) { | 1057 if (stopped()) { |
1125 return true; | 1058 return true; |
1126 } | 1059 } |
1127 | 1060 set_result(make_string_method_node(Op_StrComp, receiver, arg)); |
1128 Node* compare = make_string_method_node(Op_StrComp, receiver, argument); | |
1129 push(compare); | |
1130 return true; | 1061 return true; |
1131 } | 1062 } |
1132 | 1063 |
1133 //------------------------------inline_string_equals------------------------ | 1064 //------------------------------inline_string_equals------------------------ |
1134 bool LibraryCallKit::inline_string_equals() { | 1065 bool LibraryCallKit::inline_string_equals() { |
1135 | 1066 Node* receiver = null_check_receiver(); |
1136 if (!Matcher::has_match_rule(Op_StrEquals)) return false; | 1067 // NOTE: Do not null check argument for String.equals() because spec |
1137 | 1068 // allows to specify NULL as argument. |
1138 int nargs = 2; | 1069 Node* argument = this->argument(1); |
1139 _sp += nargs; | |
1140 Node* argument = pop(); // pop non-receiver first: it was pushed second | |
1141 Node* receiver = pop(); | |
1142 | |
1143 // Null check on self without removing any arguments. The argument | |
1144 // null check technically happens in the wrong place, which can lead to | |
1145 // invalid stack traces when string compare is inlined into a method | |
1146 // which handles NullPointerExceptions. | |
1147 _sp += nargs; | |
1148 receiver = do_null_check(receiver, T_OBJECT); | |
1149 //should not do null check for argument for String.equals(), because spec | |
1150 //allows to specify NULL as argument. | |
1151 _sp -= nargs; | |
1152 | |
1153 if (stopped()) { | 1070 if (stopped()) { |
1154 return true; | 1071 return true; |
1155 } | 1072 } |
1156 | 1073 |
1157 // paths (plus control) merge | 1074 // paths (plus control) merge |
1171 | 1088 |
1172 // get String klass for instanceOf | 1089 // get String klass for instanceOf |
1173 ciInstanceKlass* klass = env()->String_klass(); | 1090 ciInstanceKlass* klass = env()->String_klass(); |
1174 | 1091 |
1175 if (!stopped()) { | 1092 if (!stopped()) { |
1176 _sp += nargs; // gen_instanceof might do an uncommon trap | |
1177 Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass))); | 1093 Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass))); |
1178 _sp -= nargs; | |
1179 Node* cmp = _gvn.transform(new (C) CmpINode(inst, intcon(1))); | 1094 Node* cmp = _gvn.transform(new (C) CmpINode(inst, intcon(1))); |
1180 Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne)); | 1095 Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne)); |
1181 | 1096 |
1182 Node* inst_false = generate_guard(bol, NULL, PROB_MIN); | 1097 Node* inst_false = generate_guard(bol, NULL, PROB_MIN); |
1183 //instanceOf == true, fallthrough | 1098 //instanceOf == true, fallthrough |
1205 | 1120 |
1206 // Get length of receiver | 1121 // Get length of receiver |
1207 Node* receiver_cnt = load_String_length(no_ctrl, receiver); | 1122 Node* receiver_cnt = load_String_length(no_ctrl, receiver); |
1208 | 1123 |
1209 // Get start addr of argument | 1124 // Get start addr of argument |
1210 Node* argument_val = load_String_value(no_ctrl, argument); | 1125 Node* argument_val = load_String_value(no_ctrl, argument); |
1211 Node* argument_offset = load_String_offset(no_ctrl, argument); | 1126 Node* argument_offset = load_String_offset(no_ctrl, argument); |
1212 Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR); | 1127 Node* argument_start = array_element_address(argument_val, argument_offset, T_CHAR); |
1213 | 1128 |
1214 // Get length of argument | 1129 // Get length of argument |
1215 Node* argument_cnt = load_String_length(no_ctrl, argument); | 1130 Node* argument_cnt = load_String_length(no_ctrl, argument); |
1234 | 1149 |
1235 // post merge | 1150 // post merge |
1236 set_control(_gvn.transform(region)); | 1151 set_control(_gvn.transform(region)); |
1237 record_for_igvn(region); | 1152 record_for_igvn(region); |
1238 | 1153 |
1239 push(_gvn.transform(phi)); | 1154 set_result(_gvn.transform(phi)); |
1240 | |
1241 return true; | 1155 return true; |
1242 } | 1156 } |
1243 | 1157 |
1244 //------------------------------inline_array_equals---------------------------- | 1158 //------------------------------inline_array_equals---------------------------- |
1245 bool LibraryCallKit::inline_array_equals() { | 1159 bool LibraryCallKit::inline_array_equals() { |
1246 | 1160 Node* arg1 = argument(0); |
1247 if (!Matcher::has_match_rule(Op_AryEq)) return false; | 1161 Node* arg2 = argument(1); |
1248 | 1162 set_result(_gvn.transform(new (C) AryEqNode(control(), memory(TypeAryPtr::CHARS), arg1, arg2))); |
1249 _sp += 2; | |
1250 Node *argument2 = pop(); | |
1251 Node *argument1 = pop(); | |
1252 | |
1253 Node* equals = | |
1254 _gvn.transform(new (C) AryEqNode(control(), memory(TypeAryPtr::CHARS), | |
1255 argument1, argument2) ); | |
1256 push(equals); | |
1257 return true; | 1163 return true; |
1258 } | 1164 } |
1259 | 1165 |
1260 // Java version of String.indexOf(constant string) | 1166 // Java version of String.indexOf(constant string) |
1261 // class StringDecl { | 1167 // class StringDecl { |
1323 | 1229 |
1324 Node* no_ctrl = NULL; | 1230 Node* no_ctrl = NULL; |
1325 float likely = PROB_LIKELY(0.9); | 1231 float likely = PROB_LIKELY(0.9); |
1326 float unlikely = PROB_UNLIKELY(0.9); | 1232 float unlikely = PROB_UNLIKELY(0.9); |
1327 | 1233 |
1328 const int nargs = 2; // number of arguments to push back for uncommon trap in predicate | 1234 const int nargs = 0; // no arguments to push back for uncommon trap in predicate |
1329 | 1235 |
1330 Node* source = load_String_value(no_ctrl, string_object); | 1236 Node* source = load_String_value(no_ctrl, string_object); |
1331 Node* sourceOffset = load_String_offset(no_ctrl, string_object); | 1237 Node* sourceOffset = load_String_offset(no_ctrl, string_object); |
1332 Node* sourceCount = load_String_length(no_ctrl, string_object); | 1238 Node* sourceCount = load_String_length(no_ctrl, string_object); |
1333 | 1239 |
1394 return result; | 1300 return result; |
1395 } | 1301 } |
1396 | 1302 |
1397 //------------------------------inline_string_indexOf------------------------ | 1303 //------------------------------inline_string_indexOf------------------------ |
1398 bool LibraryCallKit::inline_string_indexOf() { | 1304 bool LibraryCallKit::inline_string_indexOf() { |
1399 | 1305 Node* receiver = argument(0); |
1400 _sp += 2; | 1306 Node* arg = argument(1); |
1401 Node *argument = pop(); // pop non-receiver first: it was pushed second | |
1402 Node *receiver = pop(); | |
1403 | 1307 |
1404 Node* result; | 1308 Node* result; |
1405 // Disable the use of pcmpestri until it can be guaranteed that | 1309 // Disable the use of pcmpestri until it can be guaranteed that |
1406 // the load doesn't cross into the uncommited space. | 1310 // the load doesn't cross into the uncommited space. |
1407 if (Matcher::has_match_rule(Op_StrIndexOf) && | 1311 if (Matcher::has_match_rule(Op_StrIndexOf) && |
1408 UseSSE42Intrinsics) { | 1312 UseSSE42Intrinsics) { |
1409 // Generate SSE4.2 version of indexOf | 1313 // Generate SSE4.2 version of indexOf |
1410 // We currently only have match rules that use SSE4.2 | 1314 // We currently only have match rules that use SSE4.2 |
1411 | 1315 |
1412 // Null check on self without removing any arguments. The argument | 1316 receiver = null_check(receiver); |
1413 // null check technically happens in the wrong place, which can lead to | 1317 arg = null_check(arg); |
1414 // invalid stack traces when string compare is inlined into a method | |
1415 // which handles NullPointerExceptions. | |
1416 _sp += 2; | |
1417 receiver = do_null_check(receiver, T_OBJECT); | |
1418 argument = do_null_check(argument, T_OBJECT); | |
1419 _sp -= 2; | |
1420 | |
1421 if (stopped()) { | 1318 if (stopped()) { |
1422 return true; | 1319 return true; |
1423 } | 1320 } |
1424 | 1321 |
1425 ciInstanceKlass* str_klass = env()->String_klass(); | 1322 ciInstanceKlass* str_klass = env()->String_klass(); |
1437 | 1334 |
1438 // Get length of source string | 1335 // Get length of source string |
1439 Node* source_cnt = load_String_length(no_ctrl, receiver); | 1336 Node* source_cnt = load_String_length(no_ctrl, receiver); |
1440 | 1337 |
1441 // Get start addr of substring | 1338 // Get start addr of substring |
1442 Node* substr = load_String_value(no_ctrl, argument); | 1339 Node* substr = load_String_value(no_ctrl, arg); |
1443 Node* substr_offset = load_String_offset(no_ctrl, argument); | 1340 Node* substr_offset = load_String_offset(no_ctrl, arg); |
1444 Node* substr_start = array_element_address(substr, substr_offset, T_CHAR); | 1341 Node* substr_start = array_element_address(substr, substr_offset, T_CHAR); |
1445 | 1342 |
1446 // Get length of source string | 1343 // Get length of source string |
1447 Node* substr_cnt = load_String_length(no_ctrl, argument); | 1344 Node* substr_cnt = load_String_length(no_ctrl, arg); |
1448 | 1345 |
1449 // Check for substr count > string count | 1346 // Check for substr count > string count |
1450 Node* cmp = _gvn.transform( new(C) CmpINode(substr_cnt, source_cnt) ); | 1347 Node* cmp = _gvn.transform( new(C) CmpINode(substr_cnt, source_cnt) ); |
1451 Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::gt) ); | 1348 Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::gt) ); |
1452 Node* if_gt = generate_slow_guard(bol, NULL); | 1349 Node* if_gt = generate_slow_guard(bol, NULL); |
1475 record_for_igvn(result_rgn); | 1372 record_for_igvn(result_rgn); |
1476 result = _gvn.transform(result_phi); | 1373 result = _gvn.transform(result_phi); |
1477 | 1374 |
1478 } else { // Use LibraryCallKit::string_indexOf | 1375 } else { // Use LibraryCallKit::string_indexOf |
1479 // don't intrinsify if argument isn't a constant string. | 1376 // don't intrinsify if argument isn't a constant string. |
1480 if (!argument->is_Con()) { | 1377 if (!arg->is_Con()) { |
1481 return false; | 1378 return false; |
1482 } | 1379 } |
1483 const TypeOopPtr* str_type = _gvn.type(argument)->isa_oopptr(); | 1380 const TypeOopPtr* str_type = _gvn.type(arg)->isa_oopptr(); |
1484 if (str_type == NULL) { | 1381 if (str_type == NULL) { |
1485 return false; | 1382 return false; |
1486 } | 1383 } |
1487 ciInstanceKlass* klass = env()->String_klass(); | 1384 ciInstanceKlass* klass = env()->String_klass(); |
1488 ciObject* str_const = str_type->const_oop(); | 1385 ciObject* str_const = str_type->const_oop(); |
1509 // simplifies the resulting code somewhat so lets optimize for that. | 1406 // simplifies the resulting code somewhat so lets optimize for that. |
1510 if (o != 0 || c != pat->length()) { | 1407 if (o != 0 || c != pat->length()) { |
1511 return false; | 1408 return false; |
1512 } | 1409 } |
1513 | 1410 |
1514 // Null check on self without removing any arguments. The argument | 1411 receiver = null_check(receiver, T_OBJECT); |
1515 // null check technically happens in the wrong place, which can lead to | 1412 // NOTE: No null check on the argument is needed since it's a constant String oop. |
1516 // invalid stack traces when string compare is inlined into a method | |
1517 // which handles NullPointerExceptions. | |
1518 _sp += 2; | |
1519 receiver = do_null_check(receiver, T_OBJECT); | |
1520 // No null check on the argument is needed since it's a constant String oop. | |
1521 _sp -= 2; | |
1522 if (stopped()) { | 1413 if (stopped()) { |
1523 return true; | 1414 return true; |
1524 } | 1415 } |
1525 | 1416 |
1526 // The null string as a pattern always returns 0 (match at beginning of string) | 1417 // The null string as a pattern always returns 0 (match at beginning of string) |
1527 if (c == 0) { | 1418 if (c == 0) { |
1528 push(intcon(0)); | 1419 set_result(intcon(0)); |
1529 return true; | 1420 return true; |
1530 } | 1421 } |
1531 | 1422 |
1532 // Generate default indexOf | 1423 // Generate default indexOf |
1533 jchar lastChar = pat->char_at(o + (c - 1)); | 1424 jchar lastChar = pat->char_at(o + (c - 1)); |
1546 } | 1437 } |
1547 } | 1438 } |
1548 | 1439 |
1549 result = string_indexOf(receiver, pat, o, cache, md2); | 1440 result = string_indexOf(receiver, pat, o, cache, md2); |
1550 } | 1441 } |
1551 | 1442 set_result(result); |
1552 push(result); | |
1553 return true; | 1443 return true; |
1554 } | 1444 } |
1555 | 1445 |
1556 //--------------------------pop_math_arg-------------------------------- | 1446 //--------------------------round_double_node-------------------------------- |
1557 // Pop a double argument to a math function from the stack | 1447 // Round a double node if necessary. |
1558 // rounding it if necessary. | 1448 Node* LibraryCallKit::round_double_node(Node* n) { |
1559 Node * LibraryCallKit::pop_math_arg() { | 1449 if (Matcher::strict_fp_requires_explicit_rounding && UseSSE <= 1) |
1560 Node *arg = pop_pair(); | 1450 n = _gvn.transform(new (C) RoundDoubleNode(0, n)); |
1561 if( Matcher::strict_fp_requires_explicit_rounding && UseSSE<=1 ) | 1451 return n; |
1562 arg = _gvn.transform( new (C) RoundDoubleNode(0, arg) ); | 1452 } |
1563 return arg; | 1453 |
1454 //------------------------------inline_math----------------------------------- | |
1455 // public static double Math.abs(double) | |
1456 // public static double Math.sqrt(double) | |
1457 // public static double Math.log(double) | |
1458 // public static double Math.log10(double) | |
1459 bool LibraryCallKit::inline_math(vmIntrinsics::ID id) { | |
1460 Node* arg = round_double_node(argument(0)); | |
1461 Node* n; | |
1462 switch (id) { | |
1463 case vmIntrinsics::_dabs: n = new (C) AbsDNode( arg); break; | |
1464 case vmIntrinsics::_dsqrt: n = new (C) SqrtDNode(0, arg); break; | |
1465 case vmIntrinsics::_dlog: n = new (C) LogDNode( arg); break; | |
1466 case vmIntrinsics::_dlog10: n = new (C) Log10DNode( arg); break; | |
1467 default: fatal_unexpected_iid(id); break; | |
1468 } | |
1469 set_result(_gvn.transform(n)); | |
1470 return true; | |
1564 } | 1471 } |
1565 | 1472 |
1566 //------------------------------inline_trig---------------------------------- | 1473 //------------------------------inline_trig---------------------------------- |
1567 // Inline sin/cos/tan instructions, if possible. If rounding is required, do | 1474 // Inline sin/cos/tan instructions, if possible. If rounding is required, do |
1568 // argument reduction which will turn into a fast/slow diamond. | 1475 // argument reduction which will turn into a fast/slow diamond. |
1569 bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) { | 1476 bool LibraryCallKit::inline_trig(vmIntrinsics::ID id) { |
1570 _sp += arg_size(); // restore stack pointer | 1477 Node* arg = round_double_node(argument(0)); |
1571 Node* arg = pop_math_arg(); | 1478 Node* n = NULL; |
1572 Node* trig = NULL; | |
1573 | 1479 |
1574 switch (id) { | 1480 switch (id) { |
1575 case vmIntrinsics::_dsin: | 1481 case vmIntrinsics::_dsin: n = new (C) SinDNode(arg); break; |
1576 trig = _gvn.transform((Node*)new (C) SinDNode(arg)); | 1482 case vmIntrinsics::_dcos: n = new (C) CosDNode(arg); break; |
1577 break; | 1483 case vmIntrinsics::_dtan: n = new (C) TanDNode(arg); break; |
1578 case vmIntrinsics::_dcos: | 1484 default: fatal_unexpected_iid(id); break; |
1579 trig = _gvn.transform((Node*)new (C) CosDNode(arg)); | 1485 } |
1580 break; | 1486 n = _gvn.transform(n); |
1581 case vmIntrinsics::_dtan: | |
1582 trig = _gvn.transform((Node*)new (C) TanDNode(arg)); | |
1583 break; | |
1584 default: | |
1585 assert(false, "bad intrinsic was passed in"); | |
1586 return false; | |
1587 } | |
1588 | 1487 |
1589 // Rounding required? Check for argument reduction! | 1488 // Rounding required? Check for argument reduction! |
1590 if( Matcher::strict_fp_requires_explicit_rounding ) { | 1489 if (Matcher::strict_fp_requires_explicit_rounding) { |
1591 | |
1592 static const double pi_4 = 0.7853981633974483; | 1490 static const double pi_4 = 0.7853981633974483; |
1593 static const double neg_pi_4 = -0.7853981633974483; | 1491 static const double neg_pi_4 = -0.7853981633974483; |
1594 // pi/2 in 80-bit extended precision | 1492 // pi/2 in 80-bit extended precision |
1595 // static const unsigned char pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0x3f,0x00,0x00,0x00,0x00,0x00,0x00}; | 1493 // static const unsigned char pi_2_bits_x[] = {0x35,0xc2,0x68,0x21,0xa2,0xda,0x0f,0xc9,0xff,0x3f,0x00,0x00,0x00,0x00,0x00,0x00}; |
1596 // -pi/2 in 80-bit extended precision | 1494 // -pi/2 in 80-bit extended precision |
1621 // requires a special machine instruction to load it. Instead we'll try | 1519 // requires a special machine instruction to load it. Instead we'll try |
1622 // the 'easy' case. If we really need the extra range +/- PI/2 we'll | 1520 // the 'easy' case. If we really need the extra range +/- PI/2 we'll |
1623 // probably do the math inside the SIN encoding. | 1521 // probably do the math inside the SIN encoding. |
1624 | 1522 |
1625 // Make the merge point | 1523 // Make the merge point |
1626 RegionNode *r = new (C) RegionNode(3); | 1524 RegionNode* r = new (C) RegionNode(3); |
1627 Node *phi = new (C) PhiNode(r,Type::DOUBLE); | 1525 Node* phi = new (C) PhiNode(r, Type::DOUBLE); |
1628 | 1526 |
1629 // Flatten arg so we need only 1 test | 1527 // Flatten arg so we need only 1 test |
1630 Node *abs = _gvn.transform(new (C) AbsDNode(arg)); | 1528 Node *abs = _gvn.transform(new (C) AbsDNode(arg)); |
1631 // Node for PI/4 constant | 1529 // Node for PI/4 constant |
1632 Node *pi4 = makecon(TypeD::make(pi_4)); | 1530 Node *pi4 = makecon(TypeD::make(pi_4)); |
1637 // Branch either way | 1535 // Branch either way |
1638 IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); | 1536 IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); |
1639 set_control(opt_iff(r,iff)); | 1537 set_control(opt_iff(r,iff)); |
1640 | 1538 |
1641 // Set fast path result | 1539 // Set fast path result |
1642 phi->init_req(2,trig); | 1540 phi->init_req(2, n); |
1643 | 1541 |
1644 // Slow path - non-blocking leaf call | 1542 // Slow path - non-blocking leaf call |
1645 Node* call = NULL; | 1543 Node* call = NULL; |
1646 switch (id) { | 1544 switch (id) { |
1647 case vmIntrinsics::_dsin: | 1545 case vmIntrinsics::_dsin: |
1659 CAST_FROM_FN_PTR(address, SharedRuntime::dtan), | 1557 CAST_FROM_FN_PTR(address, SharedRuntime::dtan), |
1660 "Tan", NULL, arg, top()); | 1558 "Tan", NULL, arg, top()); |
1661 break; | 1559 break; |
1662 } | 1560 } |
1663 assert(control()->in(0) == call, ""); | 1561 assert(control()->in(0) == call, ""); |
1664 Node* slow_result = _gvn.transform(new (C) ProjNode(call,TypeFunc::Parms)); | 1562 Node* slow_result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms)); |
1665 r->init_req(1,control()); | 1563 r->init_req(1, control()); |
1666 phi->init_req(1,slow_result); | 1564 phi->init_req(1, slow_result); |
1667 | 1565 |
1668 // Post-merge | 1566 // Post-merge |
1669 set_control(_gvn.transform(r)); | 1567 set_control(_gvn.transform(r)); |
1670 record_for_igvn(r); | 1568 record_for_igvn(r); |
1671 trig = _gvn.transform(phi); | 1569 n = _gvn.transform(phi); |
1672 | 1570 |
1673 C->set_has_split_ifs(true); // Has chance for split-if optimization | 1571 C->set_has_split_ifs(true); // Has chance for split-if optimization |
1674 } | 1572 } |
1675 // Push result back on JVM stack | 1573 set_result(n); |
1676 push_pair(trig); | |
1677 return true; | |
1678 } | |
1679 | |
1680 //------------------------------inline_sqrt------------------------------------- | |
1681 // Inline square root instruction, if possible. | |
1682 bool LibraryCallKit::inline_sqrt(vmIntrinsics::ID id) { | |
1683 assert(id == vmIntrinsics::_dsqrt, "Not square root"); | |
1684 _sp += arg_size(); // restore stack pointer | |
1685 push_pair(_gvn.transform(new (C) SqrtDNode(0, pop_math_arg()))); | |
1686 return true; | |
1687 } | |
1688 | |
1689 //------------------------------inline_abs------------------------------------- | |
1690 // Inline absolute value instruction, if possible. | |
1691 bool LibraryCallKit::inline_abs(vmIntrinsics::ID id) { | |
1692 assert(id == vmIntrinsics::_dabs, "Not absolute value"); | |
1693 _sp += arg_size(); // restore stack pointer | |
1694 push_pair(_gvn.transform(new (C) AbsDNode(pop_math_arg()))); | |
1695 return true; | 1574 return true; |
1696 } | 1575 } |
1697 | 1576 |
1698 void LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) { | 1577 void LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) { |
1699 //------------------- | 1578 //------------------- |
1700 //result=(result.isNaN())? funcAddr():result; | 1579 //result=(result.isNaN())? funcAddr():result; |
1701 // Check: If isNaN() by checking result!=result? then either trap | 1580 // Check: If isNaN() by checking result!=result? then either trap |
1702 // or go to runtime | 1581 // or go to runtime |
1703 Node* cmpisnan = _gvn.transform(new (C) CmpDNode(result,result)); | 1582 Node* cmpisnan = _gvn.transform(new (C) CmpDNode(result, result)); |
1704 // Build the boolean node | 1583 // Build the boolean node |
1705 Node* bolisnum = _gvn.transform( new (C) BoolNode(cmpisnan, BoolTest::eq) ); | 1584 Node* bolisnum = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::eq)); |
1706 | 1585 |
1707 if (!too_many_traps(Deoptimization::Reason_intrinsic)) { | 1586 if (!too_many_traps(Deoptimization::Reason_intrinsic)) { |
1708 { | 1587 { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT); |
1709 BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT); | |
1710 // End the current control-flow path | |
1711 push_pair(x); | |
1712 if (y != NULL) { | |
1713 push_pair(y); | |
1714 } | |
1715 // The pow or exp intrinsic returned a NaN, which requires a call | 1588 // The pow or exp intrinsic returned a NaN, which requires a call |
1716 // to the runtime. Recompile with the runtime call. | 1589 // to the runtime. Recompile with the runtime call. |
1717 uncommon_trap(Deoptimization::Reason_intrinsic, | 1590 uncommon_trap(Deoptimization::Reason_intrinsic, |
1718 Deoptimization::Action_make_not_entrant); | 1591 Deoptimization::Action_make_not_entrant); |
1719 } | 1592 } |
1720 push_pair(result); | 1593 set_result(result); |
1721 } else { | 1594 } else { |
1722 // If this inlining ever returned NaN in the past, we compile a call | 1595 // If this inlining ever returned NaN in the past, we compile a call |
1723 // to the runtime to properly handle corner cases | 1596 // to the runtime to properly handle corner cases |
1724 | 1597 |
1725 IfNode* iff = create_and_xform_if(control(), bolisnum, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); | 1598 IfNode* iff = create_and_xform_if(control(), bolisnum, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); |
1726 Node* if_slow = _gvn.transform( new (C) IfFalseNode(iff) ); | 1599 Node* if_slow = _gvn.transform( new (C) IfFalseNode(iff) ); |
1727 Node* if_fast = _gvn.transform( new (C) IfTrueNode(iff) ); | 1600 Node* if_fast = _gvn.transform( new (C) IfTrueNode(iff) ); |
1728 | 1601 |
1729 if (!if_slow->is_top()) { | 1602 if (!if_slow->is_top()) { |
1730 RegionNode* result_region = new(C) RegionNode(3); | 1603 RegionNode* result_region = new (C) RegionNode(3); |
1731 PhiNode* result_val = new (C) PhiNode(result_region, Type::DOUBLE); | 1604 PhiNode* result_val = new (C) PhiNode(result_region, Type::DOUBLE); |
1732 | 1605 |
1733 result_region->init_req(1, if_fast); | 1606 result_region->init_req(1, if_fast); |
1734 result_val->init_req(1, result); | 1607 result_val->init_req(1, result); |
1735 | 1608 |
1745 assert(value_top == top(), "second value must be top"); | 1618 assert(value_top == top(), "second value must be top"); |
1746 #endif | 1619 #endif |
1747 | 1620 |
1748 result_region->init_req(2, control()); | 1621 result_region->init_req(2, control()); |
1749 result_val->init_req(2, value); | 1622 result_val->init_req(2, value); |
1750 push_result(result_region, result_val); | 1623 set_result(result_region, result_val); |
1751 } else { | 1624 } else { |
1752 push_pair(result); | 1625 set_result(result); |
1753 } | 1626 } |
1754 } | 1627 } |
1755 } | 1628 } |
1756 | 1629 |
1757 //------------------------------inline_exp------------------------------------- | 1630 //------------------------------inline_exp------------------------------------- |
1758 // Inline exp instructions, if possible. The Intel hardware only misses | 1631 // Inline exp instructions, if possible. The Intel hardware only misses |
1759 // really odd corner cases (+/- Infinity). Just uncommon-trap them. | 1632 // really odd corner cases (+/- Infinity). Just uncommon-trap them. |
1760 bool LibraryCallKit::inline_exp(vmIntrinsics::ID id) { | 1633 bool LibraryCallKit::inline_exp() { |
1761 assert(id == vmIntrinsics::_dexp, "Not exp"); | 1634 Node* arg = round_double_node(argument(0)); |
1762 | 1635 Node* n = _gvn.transform(new (C) ExpDNode(0, arg)); |
1763 _sp += arg_size(); // restore stack pointer | 1636 |
1764 Node *x = pop_math_arg(); | 1637 finish_pow_exp(n, arg, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP"); |
1765 Node *result = _gvn.transform(new (C) ExpDNode(0,x)); | |
1766 | |
1767 finish_pow_exp(result, x, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP"); | |
1768 | 1638 |
1769 C->set_has_split_ifs(true); // Has chance for split-if optimization | 1639 C->set_has_split_ifs(true); // Has chance for split-if optimization |
1770 | |
1771 return true; | 1640 return true; |
1772 } | 1641 } |
1773 | 1642 |
1774 //------------------------------inline_pow------------------------------------- | 1643 //------------------------------inline_pow------------------------------------- |
1775 // Inline power instructions, if possible. | 1644 // Inline power instructions, if possible. |
1776 bool LibraryCallKit::inline_pow(vmIntrinsics::ID id) { | 1645 bool LibraryCallKit::inline_pow() { |
1777 assert(id == vmIntrinsics::_dpow, "Not pow"); | |
1778 | |
1779 // Pseudocode for pow | 1646 // Pseudocode for pow |
1780 // if (x <= 0.0) { | 1647 // if (x <= 0.0) { |
1781 // long longy = (long)y; | 1648 // long longy = (long)y; |
1782 // if ((double)longy == y) { // if y is long | 1649 // if ((double)longy == y) { // if y is long |
1783 // if (y + 1 == y) longy = 0; // huge number: even | 1650 // if (y + 1 == y) longy = 0; // huge number: even |
1791 // if (result != result)? { | 1658 // if (result != result)? { |
1792 // result = uncommon_trap() or runtime_call(); | 1659 // result = uncommon_trap() or runtime_call(); |
1793 // } | 1660 // } |
1794 // return result; | 1661 // return result; |
1795 | 1662 |
1796 _sp += arg_size(); // restore stack pointer | 1663 Node* x = round_double_node(argument(0)); |
1797 Node* y = pop_math_arg(); | 1664 Node* y = round_double_node(argument(2)); |
1798 Node* x = pop_math_arg(); | |
1799 | 1665 |
1800 Node* result = NULL; | 1666 Node* result = NULL; |
1801 | 1667 |
1802 if (!too_many_traps(Deoptimization::Reason_intrinsic)) { | 1668 if (!too_many_traps(Deoptimization::Reason_intrinsic)) { |
1803 // Short form: skip the fancy tests and just check for NaN result. | 1669 // Short form: skip the fancy tests and just check for NaN result. |
1804 result = _gvn.transform( new (C) PowDNode(0, x, y) ); | 1670 result = _gvn.transform(new (C) PowDNode(0, x, y)); |
1805 } else { | 1671 } else { |
1806 // If this inlining ever returned NaN in the past, include all | 1672 // If this inlining ever returned NaN in the past, include all |
1807 // checks + call to the runtime. | 1673 // checks + call to the runtime. |
1808 | 1674 |
1809 // Set the merge point for If node with condition of (x <= 0.0) | 1675 // Set the merge point for If node with condition of (x <= 0.0) |
1917 phi->init_req(1,slow_result); | 1783 phi->init_req(1,slow_result); |
1918 | 1784 |
1919 // Post merge | 1785 // Post merge |
1920 set_control(_gvn.transform(r)); | 1786 set_control(_gvn.transform(r)); |
1921 record_for_igvn(r); | 1787 record_for_igvn(r); |
1922 result=_gvn.transform(phi); | 1788 result = _gvn.transform(phi); |
1923 } | 1789 } |
1924 | 1790 |
1925 finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW"); | 1791 finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW"); |
1926 | 1792 |
1927 C->set_has_split_ifs(true); // Has chance for split-if optimization | 1793 C->set_has_split_ifs(true); // Has chance for split-if optimization |
1928 | |
1929 return true; | |
1930 } | |
1931 | |
1932 //------------------------------inline_trans------------------------------------- | |
1933 // Inline transcendental instructions, if possible. The Intel hardware gets | |
1934 // these right, no funny corner cases missed. | |
1935 bool LibraryCallKit::inline_trans(vmIntrinsics::ID id) { | |
1936 _sp += arg_size(); // restore stack pointer | |
1937 Node* arg = pop_math_arg(); | |
1938 Node* trans = NULL; | |
1939 | |
1940 switch (id) { | |
1941 case vmIntrinsics::_dlog: | |
1942 trans = _gvn.transform((Node*)new (C) LogDNode(arg)); | |
1943 break; | |
1944 case vmIntrinsics::_dlog10: | |
1945 trans = _gvn.transform((Node*)new (C) Log10DNode(arg)); | |
1946 break; | |
1947 default: | |
1948 assert(false, "bad intrinsic was passed in"); | |
1949 return false; | |
1950 } | |
1951 | |
1952 // Push result back on JVM stack | |
1953 push_pair(trans); | |
1954 return true; | 1794 return true; |
1955 } | 1795 } |
1956 | 1796 |
1957 //------------------------------runtime_math----------------------------- | 1797 //------------------------------runtime_math----------------------------- |
1958 bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) { | 1798 bool LibraryCallKit::runtime_math(const TypeFunc* call_type, address funcAddr, const char* funcName) { |
1959 Node* a = NULL; | |
1960 Node* b = NULL; | |
1961 | |
1962 assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(), | 1799 assert(call_type == OptoRuntime::Math_DD_D_Type() || call_type == OptoRuntime::Math_D_D_Type(), |
1963 "must be (DD)D or (D)D type"); | 1800 "must be (DD)D or (D)D type"); |
1964 | 1801 |
1965 // Inputs | 1802 // Inputs |
1966 _sp += arg_size(); // restore stack pointer | 1803 Node* a = round_double_node(argument(0)); |
1967 if (call_type == OptoRuntime::Math_DD_D_Type()) { | 1804 Node* b = (call_type == OptoRuntime::Math_DD_D_Type()) ? round_double_node(argument(2)) : NULL; |
1968 b = pop_math_arg(); | |
1969 } | |
1970 a = pop_math_arg(); | |
1971 | 1805 |
1972 const TypePtr* no_memory_effects = NULL; | 1806 const TypePtr* no_memory_effects = NULL; |
1973 Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName, | 1807 Node* trig = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName, |
1974 no_memory_effects, | 1808 no_memory_effects, |
1975 a, top(), b, b ? top() : NULL); | 1809 a, top(), b, b ? top() : NULL); |
1977 #ifdef ASSERT | 1811 #ifdef ASSERT |
1978 Node* value_top = _gvn.transform(new (C) ProjNode(trig, TypeFunc::Parms+1)); | 1812 Node* value_top = _gvn.transform(new (C) ProjNode(trig, TypeFunc::Parms+1)); |
1979 assert(value_top == top(), "second value must be top"); | 1813 assert(value_top == top(), "second value must be top"); |
1980 #endif | 1814 #endif |
1981 | 1815 |
1982 push_pair(value); | 1816 set_result(value); |
1983 return true; | 1817 return true; |
1984 } | 1818 } |
1985 | 1819 |
1986 //------------------------------inline_math_native----------------------------- | 1820 //------------------------------inline_math_native----------------------------- |
1987 bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) { | 1821 bool LibraryCallKit::inline_math_native(vmIntrinsics::ID id) { |
1822 #define FN_PTR(f) CAST_FROM_FN_PTR(address, f) | |
1988 switch (id) { | 1823 switch (id) { |
1989 // These intrinsics are not properly supported on all hardware | 1824 // These intrinsics are not properly supported on all hardware |
1990 case vmIntrinsics::_dcos: return Matcher::has_match_rule(Op_CosD) ? inline_trig(id) : | 1825 case vmIntrinsics::_dcos: return Matcher::has_match_rule(Op_CosD) ? inline_trig(id) : |
1991 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dcos), "COS"); | 1826 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dcos), "COS"); |
1992 case vmIntrinsics::_dsin: return Matcher::has_match_rule(Op_SinD) ? inline_trig(id) : | 1827 case vmIntrinsics::_dsin: return Matcher::has_match_rule(Op_SinD) ? inline_trig(id) : |
1993 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dsin), "SIN"); | 1828 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dsin), "SIN"); |
1994 case vmIntrinsics::_dtan: return Matcher::has_match_rule(Op_TanD) ? inline_trig(id) : | 1829 case vmIntrinsics::_dtan: return Matcher::has_match_rule(Op_TanD) ? inline_trig(id) : |
1995 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dtan), "TAN"); | 1830 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dtan), "TAN"); |
1996 | 1831 |
1997 case vmIntrinsics::_dlog: return Matcher::has_match_rule(Op_LogD) ? inline_trans(id) : | 1832 case vmIntrinsics::_dlog: return Matcher::has_match_rule(Op_LogD) ? inline_math(id) : |
1998 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog), "LOG"); | 1833 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog), "LOG"); |
1999 case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_trans(id) : | 1834 case vmIntrinsics::_dlog10: return Matcher::has_match_rule(Op_Log10D) ? inline_math(id) : |
2000 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dlog10), "LOG10"); | 1835 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10"); |
2001 | 1836 |
2002 // These intrinsics are supported on all hardware | 1837 // These intrinsics are supported on all hardware |
2003 case vmIntrinsics::_dsqrt: return Matcher::has_match_rule(Op_SqrtD) ? inline_sqrt(id) : false; | 1838 case vmIntrinsics::_dsqrt: return Matcher::has_match_rule(Op_SqrtD) ? inline_math(id) : false; |
2004 case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_abs(id) : false; | 1839 case vmIntrinsics::_dabs: return Matcher::has_match_rule(Op_AbsD) ? inline_math(id) : false; |
2005 | 1840 |
2006 case vmIntrinsics::_dexp: return | 1841 case vmIntrinsics::_dexp: return Matcher::has_match_rule(Op_ExpD) ? inline_exp() : |
2007 Matcher::has_match_rule(Op_ExpD) ? inline_exp(id) : | 1842 runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dexp), "EXP"); |
2008 runtime_math(OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP"); | 1843 case vmIntrinsics::_dpow: return Matcher::has_match_rule(Op_PowD) ? inline_pow() : |
2009 case vmIntrinsics::_dpow: return | 1844 runtime_math(OptoRuntime::Math_DD_D_Type(), FN_PTR(SharedRuntime::dpow), "POW"); |
2010 Matcher::has_match_rule(Op_PowD) ? inline_pow(id) : | 1845 #undef FN_PTR |
2011 runtime_math(OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW"); | |
2012 | 1846 |
2013 // These intrinsics are not yet correctly implemented | 1847 // These intrinsics are not yet correctly implemented |
2014 case vmIntrinsics::_datan2: | 1848 case vmIntrinsics::_datan2: |
2015 return false; | 1849 return false; |
2016 | 1850 |
2017 default: | 1851 default: |
2018 ShouldNotReachHere(); | 1852 fatal_unexpected_iid(id); |
2019 return false; | 1853 return false; |
2020 } | 1854 } |
2021 } | 1855 } |
2022 | 1856 |
2023 static bool is_simple_name(Node* n) { | 1857 static bool is_simple_name(Node* n) { |
2028 ); | 1862 ); |
2029 } | 1863 } |
2030 | 1864 |
2031 //----------------------------inline_min_max----------------------------------- | 1865 //----------------------------inline_min_max----------------------------------- |
2032 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) { | 1866 bool LibraryCallKit::inline_min_max(vmIntrinsics::ID id) { |
2033 push(generate_min_max(id, argument(0), argument(1))); | 1867 set_result(generate_min_max(id, argument(0), argument(1))); |
2034 | |
2035 return true; | 1868 return true; |
2036 } | 1869 } |
2037 | 1870 |
2038 Node* | 1871 Node* |
2039 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) { | 1872 LibraryCallKit::generate_min_max(vmIntrinsics::ID id, Node* x0, Node* y0) { |
2252 } else { | 2085 } else { |
2253 return basic_plus_adr(base, offset); | 2086 return basic_plus_adr(base, offset); |
2254 } | 2087 } |
2255 } | 2088 } |
2256 | 2089 |
2257 //-------------------inline_numberOfLeadingZeros_int/long----------------------- | 2090 //--------------------------inline_number_methods----------------------------- |
2258 // inline int Integer.numberOfLeadingZeros(int) | 2091 // inline int Integer.numberOfLeadingZeros(int) |
2259 // inline int Long.numberOfLeadingZeros(long) | 2092 // inline int Long.numberOfLeadingZeros(long) |
2260 bool LibraryCallKit::inline_numberOfLeadingZeros(vmIntrinsics::ID id) { | 2093 // |
2261 assert(id == vmIntrinsics::_numberOfLeadingZeros_i || id == vmIntrinsics::_numberOfLeadingZeros_l, "not numberOfLeadingZeros"); | 2094 // inline int Integer.numberOfTrailingZeros(int) |
2262 if (id == vmIntrinsics::_numberOfLeadingZeros_i && !Matcher::match_rule_supported(Op_CountLeadingZerosI)) return false; | 2095 // inline int Long.numberOfTrailingZeros(long) |
2263 if (id == vmIntrinsics::_numberOfLeadingZeros_l && !Matcher::match_rule_supported(Op_CountLeadingZerosL)) return false; | 2096 // |
2264 _sp += arg_size(); // restore stack pointer | 2097 // inline int Integer.bitCount(int) |
2098 // inline int Long.bitCount(long) | |
2099 // | |
2100 // inline char Character.reverseBytes(char) | |
2101 // inline short Short.reverseBytes(short) | |
2102 // inline int Integer.reverseBytes(int) | |
2103 // inline long Long.reverseBytes(long) | |
2104 bool LibraryCallKit::inline_number_methods(vmIntrinsics::ID id) { | |
2105 Node* arg = argument(0); | |
2106 Node* n; | |
2265 switch (id) { | 2107 switch (id) { |
2266 case vmIntrinsics::_numberOfLeadingZeros_i: | 2108 case vmIntrinsics::_numberOfLeadingZeros_i: n = new (C) CountLeadingZerosINode( arg); break; |
2267 push(_gvn.transform(new (C) CountLeadingZerosINode(pop()))); | 2109 case vmIntrinsics::_numberOfLeadingZeros_l: n = new (C) CountLeadingZerosLNode( arg); break; |
2268 break; | 2110 case vmIntrinsics::_numberOfTrailingZeros_i: n = new (C) CountTrailingZerosINode(arg); break; |
2269 case vmIntrinsics::_numberOfLeadingZeros_l: | 2111 case vmIntrinsics::_numberOfTrailingZeros_l: n = new (C) CountTrailingZerosLNode(arg); break; |
2270 push(_gvn.transform(new (C) CountLeadingZerosLNode(pop_pair()))); | 2112 case vmIntrinsics::_bitCount_i: n = new (C) PopCountINode( arg); break; |
2271 break; | 2113 case vmIntrinsics::_bitCount_l: n = new (C) PopCountLNode( arg); break; |
2272 default: | 2114 case vmIntrinsics::_reverseBytes_c: n = new (C) ReverseBytesUSNode(0, arg); break; |
2273 ShouldNotReachHere(); | 2115 case vmIntrinsics::_reverseBytes_s: n = new (C) ReverseBytesSNode( 0, arg); break; |
2274 } | 2116 case vmIntrinsics::_reverseBytes_i: n = new (C) ReverseBytesINode( 0, arg); break; |
2275 return true; | 2117 case vmIntrinsics::_reverseBytes_l: n = new (C) ReverseBytesLNode( 0, arg); break; |
2276 } | 2118 default: fatal_unexpected_iid(id); break; |
2277 | 2119 } |
2278 //-------------------inline_numberOfTrailingZeros_int/long---------------------- | 2120 set_result(_gvn.transform(n)); |
2279 // inline int Integer.numberOfTrailingZeros(int) | |
2280 // inline int Long.numberOfTrailingZeros(long) | |
2281 bool LibraryCallKit::inline_numberOfTrailingZeros(vmIntrinsics::ID id) { | |
2282 assert(id == vmIntrinsics::_numberOfTrailingZeros_i || id == vmIntrinsics::_numberOfTrailingZeros_l, "not numberOfTrailingZeros"); | |
2283 if (id == vmIntrinsics::_numberOfTrailingZeros_i && !Matcher::match_rule_supported(Op_CountTrailingZerosI)) return false; | |
2284 if (id == vmIntrinsics::_numberOfTrailingZeros_l && !Matcher::match_rule_supported(Op_CountTrailingZerosL)) return false; | |
2285 _sp += arg_size(); // restore stack pointer | |
2286 switch (id) { | |
2287 case vmIntrinsics::_numberOfTrailingZeros_i: | |
2288 push(_gvn.transform(new (C) CountTrailingZerosINode(pop()))); | |
2289 break; | |
2290 case vmIntrinsics::_numberOfTrailingZeros_l: | |
2291 push(_gvn.transform(new (C) CountTrailingZerosLNode(pop_pair()))); | |
2292 break; | |
2293 default: | |
2294 ShouldNotReachHere(); | |
2295 } | |
2296 return true; | |
2297 } | |
2298 | |
2299 //----------------------------inline_bitCount_int/long----------------------- | |
2300 // inline int Integer.bitCount(int) | |
2301 // inline int Long.bitCount(long) | |
2302 bool LibraryCallKit::inline_bitCount(vmIntrinsics::ID id) { | |
2303 assert(id == vmIntrinsics::_bitCount_i || id == vmIntrinsics::_bitCount_l, "not bitCount"); | |
2304 if (id == vmIntrinsics::_bitCount_i && !Matcher::has_match_rule(Op_PopCountI)) return false; | |
2305 if (id == vmIntrinsics::_bitCount_l && !Matcher::has_match_rule(Op_PopCountL)) return false; | |
2306 _sp += arg_size(); // restore stack pointer | |
2307 switch (id) { | |
2308 case vmIntrinsics::_bitCount_i: | |
2309 push(_gvn.transform(new (C) PopCountINode(pop()))); | |
2310 break; | |
2311 case vmIntrinsics::_bitCount_l: | |
2312 push(_gvn.transform(new (C) PopCountLNode(pop_pair()))); | |
2313 break; | |
2314 default: | |
2315 ShouldNotReachHere(); | |
2316 } | |
2317 return true; | |
2318 } | |
2319 | |
2320 //----------------------------inline_reverseBytes_int/long/char/short------------------- | |
2321 // inline Integer.reverseBytes(int) | |
2322 // inline Long.reverseBytes(long) | |
2323 // inline Character.reverseBytes(char) | |
2324 // inline Short.reverseBytes(short) | |
2325 bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) { | |
2326 assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l || | |
2327 id == vmIntrinsics::_reverseBytes_c || id == vmIntrinsics::_reverseBytes_s, | |
2328 "not reverse Bytes"); | |
2329 if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false; | |
2330 if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL)) return false; | |
2331 if (id == vmIntrinsics::_reverseBytes_c && !Matcher::has_match_rule(Op_ReverseBytesUS)) return false; | |
2332 if (id == vmIntrinsics::_reverseBytes_s && !Matcher::has_match_rule(Op_ReverseBytesS)) return false; | |
2333 _sp += arg_size(); // restore stack pointer | |
2334 switch (id) { | |
2335 case vmIntrinsics::_reverseBytes_i: | |
2336 push(_gvn.transform(new (C) ReverseBytesINode(0, pop()))); | |
2337 break; | |
2338 case vmIntrinsics::_reverseBytes_l: | |
2339 push_pair(_gvn.transform(new (C) ReverseBytesLNode(0, pop_pair()))); | |
2340 break; | |
2341 case vmIntrinsics::_reverseBytes_c: | |
2342 push(_gvn.transform(new (C) ReverseBytesUSNode(0, pop()))); | |
2343 break; | |
2344 case vmIntrinsics::_reverseBytes_s: | |
2345 push(_gvn.transform(new (C) ReverseBytesSNode(0, pop()))); | |
2346 break; | |
2347 default: | |
2348 ; | |
2349 } | |
2350 return true; | 2121 return true; |
2351 } | 2122 } |
2352 | 2123 |
2353 //----------------------------inline_unsafe_access---------------------------- | 2124 //----------------------------inline_unsafe_access---------------------------- |
2354 | 2125 |
2355 const static BasicType T_ADDRESS_HOLDER = T_LONG; | 2126 const static BasicType T_ADDRESS_HOLDER = T_LONG; |
2356 | 2127 |
2357 // Helper that guards and inserts a pre-barrier. | 2128 // Helper that guards and inserts a pre-barrier. |
2358 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset, | 2129 void LibraryCallKit::insert_pre_barrier(Node* base_oop, Node* offset, |
2359 Node* pre_val, int nargs, bool need_mem_bar) { | 2130 Node* pre_val, bool need_mem_bar) { |
2360 // We could be accessing the referent field of a reference object. If so, when G1 | 2131 // We could be accessing the referent field of a reference object. If so, when G1 |
2361 // is enabled, we need to log the value in the referent field in an SATB buffer. | 2132 // is enabled, we need to log the value in the referent field in an SATB buffer. |
2362 // This routine performs some compile time filters and generates suitable | 2133 // This routine performs some compile time filters and generates suitable |
2363 // runtime filters that guard the pre-barrier code. | 2134 // runtime filters that guard the pre-barrier code. |
2364 // Also add memory barrier for non volatile load from the referent field | 2135 // Also add memory barrier for non volatile load from the referent field |
2404 // if (instance_of(base, java.lang.ref.Reference)) { | 2175 // if (instance_of(base, java.lang.ref.Reference)) { |
2405 // pre_barrier(_, pre_val, ...); | 2176 // pre_barrier(_, pre_val, ...); |
2406 // } | 2177 // } |
2407 // } | 2178 // } |
2408 | 2179 |
2409 float likely = PROB_LIKELY(0.999); | 2180 float likely = PROB_LIKELY( 0.999); |
2410 float unlikely = PROB_UNLIKELY(0.999); | 2181 float unlikely = PROB_UNLIKELY(0.999); |
2411 | 2182 |
2412 IdealKit ideal(this); | 2183 IdealKit ideal(this); |
2413 #define __ ideal. | 2184 #define __ ideal. |
2414 | 2185 |
2415 Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset); | 2186 Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset); |
2417 __ if_then(offset, BoolTest::eq, referent_off, unlikely); { | 2188 __ if_then(offset, BoolTest::eq, referent_off, unlikely); { |
2418 // Update graphKit memory and control from IdealKit. | 2189 // Update graphKit memory and control from IdealKit. |
2419 sync_kit(ideal); | 2190 sync_kit(ideal); |
2420 | 2191 |
2421 Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass())); | 2192 Node* ref_klass_con = makecon(TypeKlassPtr::make(env()->Reference_klass())); |
2422 _sp += nargs; // gen_instanceof might do an uncommon trap | |
2423 Node* is_instof = gen_instanceof(base_oop, ref_klass_con); | 2193 Node* is_instof = gen_instanceof(base_oop, ref_klass_con); |
2424 _sp -= nargs; | |
2425 | 2194 |
2426 // Update IdealKit memory and control from graphKit. | 2195 // Update IdealKit memory and control from graphKit. |
2427 __ sync_kit(this); | 2196 __ sync_kit(this); |
2428 | 2197 |
2429 Node* one = __ ConI(1); | 2198 Node* one = __ ConI(1); |
2503 | 2272 |
2504 #ifndef PRODUCT | 2273 #ifndef PRODUCT |
2505 { | 2274 { |
2506 ResourceMark rm; | 2275 ResourceMark rm; |
2507 // Check the signatures. | 2276 // Check the signatures. |
2508 ciSignature* sig = signature(); | 2277 ciSignature* sig = callee()->signature(); |
2509 #ifdef ASSERT | 2278 #ifdef ASSERT |
2510 if (!is_store) { | 2279 if (!is_store) { |
2511 // Object getObject(Object base, int/long offset), etc. | 2280 // Object getObject(Object base, int/long offset), etc. |
2512 BasicType rtype = sig->return_type()->basic_type(); | 2281 BasicType rtype = sig->return_type()->basic_type(); |
2513 if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name()) | 2282 if (rtype == T_ADDRESS_HOLDER && callee()->name() == ciSymbol::getAddress_name()) |
2541 } | 2310 } |
2542 #endif //PRODUCT | 2311 #endif //PRODUCT |
2543 | 2312 |
2544 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". | 2313 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". |
2545 | 2314 |
2546 int type_words = type2size[ (type == T_ADDRESS) ? T_LONG : type ]; | 2315 Node* receiver = argument(0); // type: oop |
2547 | 2316 |
2548 // Argument words: "this" plus (oop/offset) or (lo/hi) args plus maybe 1 or 2 value words | 2317 // Build address expression. See the code in inline_unsafe_prefetch. |
2549 int nargs = 1 + (is_native_ptr ? 2 : 3) + (is_store ? type_words : 0); | 2318 Node* adr; |
2550 assert(callee()->arg_size() == nargs, "must be"); | 2319 Node* heap_base_oop = top(); |
2551 | 2320 Node* offset = top(); |
2552 debug_only(int saved_sp = _sp); | |
2553 _sp += nargs; | |
2554 | |
2555 Node* val; | 2321 Node* val; |
2556 debug_only(val = (Node*)(uintptr_t)-1); | |
2557 | |
2558 | |
2559 if (is_store) { | |
2560 // Get the value being stored. (Pop it first; it was pushed last.) | |
2561 switch (type) { | |
2562 case T_DOUBLE: | |
2563 case T_LONG: | |
2564 case T_ADDRESS: | |
2565 val = pop_pair(); | |
2566 break; | |
2567 default: | |
2568 val = pop(); | |
2569 } | |
2570 } | |
2571 | |
2572 // Build address expression. See the code in inline_unsafe_prefetch. | |
2573 Node *adr; | |
2574 Node *heap_base_oop = top(); | |
2575 Node* offset = top(); | |
2576 | 2322 |
2577 if (!is_native_ptr) { | 2323 if (!is_native_ptr) { |
2324 // The base is either a Java object or a value produced by Unsafe.staticFieldBase | |
2325 Node* base = argument(1); // type: oop | |
2578 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset | 2326 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset |
2579 offset = pop_pair(); | 2327 offset = argument(2); // type: long |
2580 // The base is either a Java object or a value produced by Unsafe.staticFieldBase | |
2581 Node* base = pop(); | |
2582 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset | 2328 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset |
2583 // to be plain byte offsets, which are also the same as those accepted | 2329 // to be plain byte offsets, which are also the same as those accepted |
2584 // by oopDesc::field_base. | 2330 // by oopDesc::field_base. |
2585 assert(Unsafe_field_offset_to_byte_offset(11) == 11, | 2331 assert(Unsafe_field_offset_to_byte_offset(11) == 11, |
2586 "fieldOffset must be byte-scaled"); | 2332 "fieldOffset must be byte-scaled"); |
2587 // 32-bit machines ignore the high half! | 2333 // 32-bit machines ignore the high half! |
2588 offset = ConvL2X(offset); | 2334 offset = ConvL2X(offset); |
2589 adr = make_unsafe_address(base, offset); | 2335 adr = make_unsafe_address(base, offset); |
2590 heap_base_oop = base; | 2336 heap_base_oop = base; |
2337 val = is_store ? argument(4) : NULL; | |
2591 } else { | 2338 } else { |
2592 Node* ptr = pop_pair(); | 2339 Node* ptr = argument(1); // type: long |
2593 // Adjust Java long to machine word: | 2340 ptr = ConvL2X(ptr); // adjust Java long to machine word |
2594 ptr = ConvL2X(ptr); | |
2595 adr = make_unsafe_address(NULL, ptr); | 2341 adr = make_unsafe_address(NULL, ptr); |
2596 } | 2342 val = is_store ? argument(3) : NULL; |
2597 | 2343 } |
2598 // Pop receiver last: it was pushed first. | |
2599 Node *receiver = pop(); | |
2600 | |
2601 assert(saved_sp == _sp, "must have correct argument count"); | |
2602 | 2344 |
2603 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr(); | 2345 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr(); |
2604 | 2346 |
2605 // First guess at the value type. | 2347 // First guess at the value type. |
2606 const Type *value_type = Type::get_const_basic_type(type); | 2348 const Type *value_type = Type::get_const_basic_type(type); |
2631 if (tjp != NULL) { | 2373 if (tjp != NULL) { |
2632 value_type = tjp; | 2374 value_type = tjp; |
2633 } | 2375 } |
2634 } | 2376 } |
2635 | 2377 |
2636 // Null check on self without removing any arguments. The argument | 2378 receiver = null_check(receiver); |
2637 // null check technically happens in the wrong place, which can lead to | |
2638 // invalid stack traces when the primitive is inlined into a method | |
2639 // which handles NullPointerExceptions. | |
2640 _sp += nargs; | |
2641 do_null_check(receiver, T_OBJECT); | |
2642 _sp -= nargs; | |
2643 if (stopped()) { | 2379 if (stopped()) { |
2644 return true; | 2380 return true; |
2645 } | 2381 } |
2646 // Heap pointers get a null-check from the interpreter, | 2382 // Heap pointers get a null-check from the interpreter, |
2647 // as a courtesy. However, this is not guaranteed by Unsafe, | 2383 // as a courtesy. However, this is not guaranteed by Unsafe, |
2669 // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl. | 2405 // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl. |
2670 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); | 2406 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); |
2671 | 2407 |
2672 if (!is_store) { | 2408 if (!is_store) { |
2673 Node* p = make_load(control(), adr, value_type, type, adr_type, is_volatile); | 2409 Node* p = make_load(control(), adr, value_type, type, adr_type, is_volatile); |
2674 // load value and push onto stack | 2410 // load value |
2675 switch (type) { | 2411 switch (type) { |
2676 case T_BOOLEAN: | 2412 case T_BOOLEAN: |
2677 case T_CHAR: | 2413 case T_CHAR: |
2678 case T_BYTE: | 2414 case T_BYTE: |
2679 case T_SHORT: | 2415 case T_SHORT: |
2680 case T_INT: | 2416 case T_INT: |
2417 case T_LONG: | |
2681 case T_FLOAT: | 2418 case T_FLOAT: |
2682 push(p); | 2419 case T_DOUBLE: |
2683 break; | 2420 break; |
2684 case T_OBJECT: | 2421 case T_OBJECT: |
2685 if (need_read_barrier) { | 2422 if (need_read_barrier) { |
2686 insert_pre_barrier(heap_base_oop, offset, p, nargs, !(is_volatile || need_mem_bar)); | 2423 insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar)); |
2687 } | 2424 } |
2688 push(p); | |
2689 break; | 2425 break; |
2690 case T_ADDRESS: | 2426 case T_ADDRESS: |
2691 // Cast to an int type. | 2427 // Cast to an int type. |
2692 p = _gvn.transform( new (C) CastP2XNode(NULL,p) ); | 2428 p = _gvn.transform(new (C) CastP2XNode(NULL, p)); |
2693 p = ConvX2L(p); | 2429 p = ConvX2L(p); |
2694 push_pair(p); | |
2695 break; | 2430 break; |
2696 case T_DOUBLE: | 2431 default: |
2697 case T_LONG: | 2432 fatal(err_msg_res("unexpected type %d: %s", type, type2name(type))); |
2698 push_pair( p ); | |
2699 break; | 2433 break; |
2700 default: ShouldNotReachHere(); | 2434 } |
2701 } | 2435 // The load node has the control of the preceding MemBarCPUOrder. All |
2436 // following nodes will have the control of the MemBarCPUOrder inserted at | |
2437 // the end of this method. So, pushing the load onto the stack at a later | |
2438 // point is fine. | |
2439 set_result(p); | |
2702 } else { | 2440 } else { |
2703 // place effect of store into memory | 2441 // place effect of store into memory |
2704 switch (type) { | 2442 switch (type) { |
2705 case T_DOUBLE: | 2443 case T_DOUBLE: |
2706 val = dstore_rounding(val); | 2444 val = dstore_rounding(val); |
2760 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) { | 2498 bool LibraryCallKit::inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static) { |
2761 #ifndef PRODUCT | 2499 #ifndef PRODUCT |
2762 { | 2500 { |
2763 ResourceMark rm; | 2501 ResourceMark rm; |
2764 // Check the signatures. | 2502 // Check the signatures. |
2765 ciSignature* sig = signature(); | 2503 ciSignature* sig = callee()->signature(); |
2766 #ifdef ASSERT | 2504 #ifdef ASSERT |
2767 // Object getObject(Object base, int/long offset), etc. | 2505 // Object getObject(Object base, int/long offset), etc. |
2768 BasicType rtype = sig->return_type()->basic_type(); | 2506 BasicType rtype = sig->return_type()->basic_type(); |
2769 if (!is_native_ptr) { | 2507 if (!is_native_ptr) { |
2770 assert(sig->count() == 2, "oop prefetch has 2 arguments"); | 2508 assert(sig->count() == 2, "oop prefetch has 2 arguments"); |
2778 } | 2516 } |
2779 #endif // !PRODUCT | 2517 #endif // !PRODUCT |
2780 | 2518 |
2781 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". | 2519 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". |
2782 | 2520 |
2783 // Argument words: "this" if not static, plus (oop/offset) or (lo/hi) args | 2521 const int idx = is_static ? 0 : 1; |
2784 int nargs = (is_static ? 0 : 1) + (is_native_ptr ? 2 : 3); | 2522 if (!is_static) { |
2785 | 2523 null_check_receiver(); |
2786 debug_only(int saved_sp = _sp); | 2524 if (stopped()) { |
2787 _sp += nargs; | 2525 return true; |
2526 } | |
2527 } | |
2788 | 2528 |
2789 // Build address expression. See the code in inline_unsafe_access. | 2529 // Build address expression. See the code in inline_unsafe_access. |
2790 Node *adr; | 2530 Node *adr; |
2791 if (!is_native_ptr) { | 2531 if (!is_native_ptr) { |
2532 // The base is either a Java object or a value produced by Unsafe.staticFieldBase | |
2533 Node* base = argument(idx + 0); // type: oop | |
2792 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset | 2534 // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset |
2793 Node* offset = pop_pair(); | 2535 Node* offset = argument(idx + 1); // type: long |
2794 // The base is either a Java object or a value produced by Unsafe.staticFieldBase | |
2795 Node* base = pop(); | |
2796 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset | 2536 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset |
2797 // to be plain byte offsets, which are also the same as those accepted | 2537 // to be plain byte offsets, which are also the same as those accepted |
2798 // by oopDesc::field_base. | 2538 // by oopDesc::field_base. |
2799 assert(Unsafe_field_offset_to_byte_offset(11) == 11, | 2539 assert(Unsafe_field_offset_to_byte_offset(11) == 11, |
2800 "fieldOffset must be byte-scaled"); | 2540 "fieldOffset must be byte-scaled"); |
2801 // 32-bit machines ignore the high half! | 2541 // 32-bit machines ignore the high half! |
2802 offset = ConvL2X(offset); | 2542 offset = ConvL2X(offset); |
2803 adr = make_unsafe_address(base, offset); | 2543 adr = make_unsafe_address(base, offset); |
2804 } else { | 2544 } else { |
2805 Node* ptr = pop_pair(); | 2545 Node* ptr = argument(idx + 0); // type: long |
2806 // Adjust Java long to machine word: | 2546 ptr = ConvL2X(ptr); // adjust Java long to machine word |
2807 ptr = ConvL2X(ptr); | |
2808 adr = make_unsafe_address(NULL, ptr); | 2547 adr = make_unsafe_address(NULL, ptr); |
2809 } | |
2810 | |
2811 if (is_static) { | |
2812 assert(saved_sp == _sp, "must have correct argument count"); | |
2813 } else { | |
2814 // Pop receiver last: it was pushed first. | |
2815 Node *receiver = pop(); | |
2816 assert(saved_sp == _sp, "must have correct argument count"); | |
2817 | |
2818 // Null check on self without removing any arguments. The argument | |
2819 // null check technically happens in the wrong place, which can lead to | |
2820 // invalid stack traces when the primitive is inlined into a method | |
2821 // which handles NullPointerExceptions. | |
2822 _sp += nargs; | |
2823 do_null_check(receiver, T_OBJECT); | |
2824 _sp -= nargs; | |
2825 if (stopped()) { | |
2826 return true; | |
2827 } | |
2828 } | 2548 } |
2829 | 2549 |
2830 // Generate the read or write prefetch | 2550 // Generate the read or write prefetch |
2831 Node *prefetch; | 2551 Node *prefetch; |
2832 if (is_store) { | 2552 if (is_store) { |
2839 | 2559 |
2840 return true; | 2560 return true; |
2841 } | 2561 } |
2842 | 2562 |
2843 //----------------------------inline_unsafe_load_store---------------------------- | 2563 //----------------------------inline_unsafe_load_store---------------------------- |
2844 | 2564 // This method serves a couple of different customers (depending on LoadStoreKind): |
2565 // | |
2566 // LS_cmpxchg: | |
2567 // public final native boolean compareAndSwapObject(Object o, long offset, Object expected, Object x); | |
2568 // public final native boolean compareAndSwapInt( Object o, long offset, int expected, int x); | |
2569 // public final native boolean compareAndSwapLong( Object o, long offset, long expected, long x); | |
2570 // | |
2571 // LS_xadd: | |
2572 // public int getAndAddInt( Object o, long offset, int delta) | |
2573 // public long getAndAddLong(Object o, long offset, long delta) | |
2574 // | |
2575 // LS_xchg: | |
2576 // int getAndSet(Object o, long offset, int newValue) | |
2577 // long getAndSet(Object o, long offset, long newValue) | |
2578 // Object getAndSet(Object o, long offset, Object newValue) | |
2579 // | |
2845 bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind) { | 2580 bool LibraryCallKit::inline_unsafe_load_store(BasicType type, LoadStoreKind kind) { |
2846 // This basic scheme here is the same as inline_unsafe_access, but | 2581 // This basic scheme here is the same as inline_unsafe_access, but |
2847 // differs in enough details that combining them would make the code | 2582 // differs in enough details that combining them would make the code |
2848 // overly confusing. (This is a true fact! I originally combined | 2583 // overly confusing. (This is a true fact! I originally combined |
2849 // them, but even I was confused by it!) As much code/comments as | 2584 // them, but even I was confused by it!) As much code/comments as |
2854 | 2589 |
2855 #ifndef PRODUCT | 2590 #ifndef PRODUCT |
2856 BasicType rtype; | 2591 BasicType rtype; |
2857 { | 2592 { |
2858 ResourceMark rm; | 2593 ResourceMark rm; |
2859 ciSignature* sig = signature(); | 2594 // Check the signatures. |
2595 ciSignature* sig = callee()->signature(); | |
2860 rtype = sig->return_type()->basic_type(); | 2596 rtype = sig->return_type()->basic_type(); |
2861 if (kind == LS_xadd || kind == LS_xchg) { | 2597 if (kind == LS_xadd || kind == LS_xchg) { |
2862 // Check the signatures. | 2598 // Check the signatures. |
2863 #ifdef ASSERT | 2599 #ifdef ASSERT |
2864 assert(rtype == type, "get and set must return the expected type"); | 2600 assert(rtype == type, "get and set must return the expected type"); |
2879 ShouldNotReachHere(); | 2615 ShouldNotReachHere(); |
2880 } | 2616 } |
2881 } | 2617 } |
2882 #endif //PRODUCT | 2618 #endif //PRODUCT |
2883 | 2619 |
2884 // number of stack slots per value argument (1 or 2) | |
2885 int type_words = type2size[type]; | |
2886 | |
2887 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". | 2620 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". |
2888 | 2621 |
2889 // Argument words: "this" plus oop plus offset (plus oldvalue) plus newvalue/delta; | 2622 // Get arguments: |
2890 int nargs = 1 + 1 + 2 + ((kind == LS_cmpxchg) ? type_words : 0) + type_words; | 2623 Node* receiver = NULL; |
2891 | 2624 Node* base = NULL; |
2892 // pop arguments: newval, offset, base, and receiver | 2625 Node* offset = NULL; |
2893 debug_only(int saved_sp = _sp); | 2626 Node* oldval = NULL; |
2894 _sp += nargs; | 2627 Node* newval = NULL; |
2895 Node* newval = (type_words == 1) ? pop() : pop_pair(); | 2628 if (kind == LS_cmpxchg) { |
2896 Node* oldval = (kind == LS_cmpxchg) ? ((type_words == 1) ? pop() : pop_pair()) : NULL; | 2629 const bool two_slot_type = type2size[type] == 2; |
2897 Node *offset = pop_pair(); | 2630 receiver = argument(0); // type: oop |
2898 Node *base = pop(); | 2631 base = argument(1); // type: oop |
2899 Node *receiver = pop(); | 2632 offset = argument(2); // type: long |
2900 assert(saved_sp == _sp, "must have correct argument count"); | 2633 oldval = argument(4); // type: oop, int, or long |
2901 | 2634 newval = argument(two_slot_type ? 6 : 5); // type: oop, int, or long |
2902 // Null check receiver. | 2635 } else if (kind == LS_xadd || kind == LS_xchg){ |
2903 _sp += nargs; | 2636 receiver = argument(0); // type: oop |
2904 do_null_check(receiver, T_OBJECT); | 2637 base = argument(1); // type: oop |
2905 _sp -= nargs; | 2638 offset = argument(2); // type: long |
2639 oldval = NULL; | |
2640 newval = argument(4); // type: oop, int, or long | |
2641 } | |
2642 | |
2643 // Null check receiver. | |
2644 receiver = null_check(receiver); | |
2906 if (stopped()) { | 2645 if (stopped()) { |
2907 return true; | 2646 return true; |
2908 } | 2647 } |
2909 | 2648 |
2910 // Build field offset expression. | 2649 // Build field offset expression. |
3006 } | 2745 } |
3007 } | 2746 } |
3008 post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true); | 2747 post_barrier(control(), load_store, base, adr, alias_idx, newval, T_OBJECT, true); |
3009 break; | 2748 break; |
3010 default: | 2749 default: |
3011 ShouldNotReachHere(); | 2750 fatal(err_msg_res("unexpected type %d: %s", type, type2name(type))); |
3012 break; | 2751 break; |
3013 } | 2752 } |
3014 | 2753 |
3015 // SCMemProjNodes represent the memory state of a LoadStore. Their | 2754 // SCMemProjNodes represent the memory state of a LoadStore. Their |
3016 // main role is to prevent LoadStore nodes from being optimized away | 2755 // main role is to prevent LoadStore nodes from being optimized away |
3027 load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->bottom_type()->make_ptr())); | 2766 load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->bottom_type()->make_ptr())); |
3028 } | 2767 } |
3029 #endif | 2768 #endif |
3030 | 2769 |
3031 assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); | 2770 assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); |
3032 push_node(load_store->bottom_type()->basic_type(), load_store); | 2771 set_result(load_store); |
3033 return true; | 2772 return true; |
3034 } | 2773 } |
3035 | 2774 |
2775 //----------------------------inline_unsafe_ordered_store---------------------- | |
2776 // public native void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x); | |
2777 // public native void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x); | |
2778 // public native void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x); | |
3036 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) { | 2779 bool LibraryCallKit::inline_unsafe_ordered_store(BasicType type) { |
3037 // This is another variant of inline_unsafe_access, differing in | 2780 // This is another variant of inline_unsafe_access, differing in |
3038 // that it always issues store-store ("release") barrier and ensures | 2781 // that it always issues store-store ("release") barrier and ensures |
3039 // store-atomicity (which only matters for "long"). | 2782 // store-atomicity (which only matters for "long"). |
3040 | 2783 |
3042 | 2785 |
3043 #ifndef PRODUCT | 2786 #ifndef PRODUCT |
3044 { | 2787 { |
3045 ResourceMark rm; | 2788 ResourceMark rm; |
3046 // Check the signatures. | 2789 // Check the signatures. |
3047 ciSignature* sig = signature(); | 2790 ciSignature* sig = callee()->signature(); |
3048 #ifdef ASSERT | 2791 #ifdef ASSERT |
3049 BasicType rtype = sig->return_type()->basic_type(); | 2792 BasicType rtype = sig->return_type()->basic_type(); |
3050 assert(rtype == T_VOID, "must return void"); | 2793 assert(rtype == T_VOID, "must return void"); |
3051 assert(sig->count() == 3, "has 3 arguments"); | 2794 assert(sig->count() == 3, "has 3 arguments"); |
3052 assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object"); | 2795 assert(sig->type_at(0)->basic_type() == T_OBJECT, "base is object"); |
3053 assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long"); | 2796 assert(sig->type_at(1)->basic_type() == T_LONG, "offset is long"); |
3054 #endif // ASSERT | 2797 #endif // ASSERT |
3055 } | 2798 } |
3056 #endif //PRODUCT | 2799 #endif //PRODUCT |
3057 | 2800 |
3058 // number of stack slots per value argument (1 or 2) | |
3059 int type_words = type2size[type]; | |
3060 | |
3061 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". | 2801 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". |
3062 | 2802 |
3063 // Argument words: "this" plus oop plus offset plus value; | 2803 // Get arguments: |
3064 int nargs = 1 + 1 + 2 + type_words; | 2804 Node* receiver = argument(0); // type: oop |
3065 | 2805 Node* base = argument(1); // type: oop |
3066 // pop arguments: val, offset, base, and receiver | 2806 Node* offset = argument(2); // type: long |
3067 debug_only(int saved_sp = _sp); | 2807 Node* val = argument(4); // type: oop, int, or long |
3068 _sp += nargs; | 2808 |
3069 Node* val = (type_words == 1) ? pop() : pop_pair(); | 2809 // Null check receiver. |
3070 Node *offset = pop_pair(); | 2810 receiver = null_check(receiver); |
3071 Node *base = pop(); | |
3072 Node *receiver = pop(); | |
3073 assert(saved_sp == _sp, "must have correct argument count"); | |
3074 | |
3075 // Null check receiver. | |
3076 _sp += nargs; | |
3077 do_null_check(receiver, T_OBJECT); | |
3078 _sp -= nargs; | |
3079 if (stopped()) { | 2811 if (stopped()) { |
3080 return true; | 2812 return true; |
3081 } | 2813 } |
3082 | 2814 |
3083 // Build field offset expression. | 2815 // Build field offset expression. |
3090 Compile::AliasType* alias_type = C->alias_type(adr_type); | 2822 Compile::AliasType* alias_type = C->alias_type(adr_type); |
3091 | 2823 |
3092 insert_mem_bar(Op_MemBarRelease); | 2824 insert_mem_bar(Op_MemBarRelease); |
3093 insert_mem_bar(Op_MemBarCPUOrder); | 2825 insert_mem_bar(Op_MemBarCPUOrder); |
3094 // Ensure that the store is atomic for longs: | 2826 // Ensure that the store is atomic for longs: |
3095 bool require_atomic_access = true; | 2827 const bool require_atomic_access = true; |
3096 Node* store; | 2828 Node* store; |
3097 if (type == T_OBJECT) // reference stores need a store barrier. | 2829 if (type == T_OBJECT) // reference stores need a store barrier. |
3098 store = store_oop_to_unknown(control(), base, adr, adr_type, val, type); | 2830 store = store_oop_to_unknown(control(), base, adr, adr_type, val, type); |
3099 else { | 2831 else { |
3100 store = store_to_memory(control(), adr, val, type, adr_type, require_atomic_access); | 2832 store = store_to_memory(control(), adr, val, type, adr_type, require_atomic_access); |
3101 } | 2833 } |
3102 insert_mem_bar(Op_MemBarCPUOrder); | 2834 insert_mem_bar(Op_MemBarCPUOrder); |
3103 return true; | 2835 return true; |
3104 } | 2836 } |
3105 | 2837 |
2838 //----------------------------inline_unsafe_allocate--------------------------- | |
2839 // public native Object sun.mics.Unsafe.allocateInstance(Class<?> cls); | |
3106 bool LibraryCallKit::inline_unsafe_allocate() { | 2840 bool LibraryCallKit::inline_unsafe_allocate() { |
3107 if (callee()->is_static()) return false; // caller must have the capability! | 2841 if (callee()->is_static()) return false; // caller must have the capability! |
3108 int nargs = 1 + 1; | 2842 |
3109 assert(signature()->size() == nargs-1, "alloc has 1 argument"); | 2843 null_check_receiver(); // null-check, then ignore |
3110 null_check_receiver(callee()); // check then ignore argument(0) | 2844 Node* cls = null_check(argument(1)); |
3111 _sp += nargs; // set original stack for use by uncommon_trap | |
3112 Node* cls = do_null_check(argument(1), T_OBJECT); | |
3113 _sp -= nargs; | |
3114 if (stopped()) return true; | 2845 if (stopped()) return true; |
3115 | 2846 |
3116 Node* kls = load_klass_from_mirror(cls, false, nargs, NULL, 0); | 2847 Node* kls = load_klass_from_mirror(cls, false, NULL, 0); |
3117 _sp += nargs; // set original stack for use by uncommon_trap | 2848 kls = null_check(kls); |
3118 kls = do_null_check(kls, T_OBJECT); | |
3119 _sp -= nargs; | |
3120 if (stopped()) return true; // argument was like int.class | 2849 if (stopped()) return true; // argument was like int.class |
3121 | 2850 |
3122 // Note: The argument might still be an illegal value like | 2851 // Note: The argument might still be an illegal value like |
3123 // Serializable.class or Object[].class. The runtime will handle it. | 2852 // Serializable.class or Object[].class. The runtime will handle it. |
3124 // But we must make an explicit check for initialization. | 2853 // But we must make an explicit check for initialization. |
3125 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset())); | 2854 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset())); |
3126 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler | 2855 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler |
3127 // can generate code to load it as unsigned byte. | 2856 // can generate code to load it as unsigned byte. |
3128 Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN); | 2857 Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN); |
3129 Node* bits = intcon(InstanceKlass::fully_initialized); | 2858 Node* bits = intcon(InstanceKlass::fully_initialized); |
3130 Node* test = _gvn.transform( new (C) SubINode(inst, bits) ); | 2859 Node* test = _gvn.transform(new (C) SubINode(inst, bits)); |
3131 // The 'test' is non-zero if we need to take a slow path. | 2860 // The 'test' is non-zero if we need to take a slow path. |
3132 | 2861 |
3133 Node* obj = new_instance(kls, test); | 2862 Node* obj = new_instance(kls, test); |
3134 push(obj); | 2863 set_result(obj); |
3135 | |
3136 return true; | 2864 return true; |
3137 } | 2865 } |
3138 | 2866 |
3139 #ifdef TRACE_HAVE_INTRINSICS | 2867 #ifdef TRACE_HAVE_INTRINSICS |
3140 /* | 2868 /* |
3141 * oop -> myklass | 2869 * oop -> myklass |
3142 * myklass->trace_id |= USED | 2870 * myklass->trace_id |= USED |
3143 * return myklass->trace_id & ~0x3 | 2871 * return myklass->trace_id & ~0x3 |
3144 */ | 2872 */ |
3145 bool LibraryCallKit::inline_native_classID() { | 2873 bool LibraryCallKit::inline_native_classID() { |
3146 int nargs = 1 + 1; | 2874 null_check_receiver(); // null-check, then ignore |
3147 null_check_receiver(callee()); // check then ignore argument(0) | 2875 Node* cls = null_check(argument(1), T_OBJECT); |
3148 _sp += nargs; | 2876 Node* kls = load_klass_from_mirror(cls, false, NULL, 0); |
3149 Node* cls = do_null_check(argument(1), T_OBJECT); | 2877 kls = null_check(kls, T_OBJECT); |
3150 _sp -= nargs; | |
3151 Node* kls = load_klass_from_mirror(cls, false, nargs, NULL, 0); | |
3152 _sp += nargs; | |
3153 kls = do_null_check(kls, T_OBJECT); | |
3154 _sp -= nargs; | |
3155 ByteSize offset = TRACE_ID_OFFSET; | 2878 ByteSize offset = TRACE_ID_OFFSET; |
3156 Node* insp = basic_plus_adr(kls, in_bytes(offset)); | 2879 Node* insp = basic_plus_adr(kls, in_bytes(offset)); |
3157 Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG); | 2880 Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG); |
3158 Node* bits = longcon(~0x03l); // ignore bit 0 & 1 | 2881 Node* bits = longcon(~0x03l); // ignore bit 0 & 1 |
3159 Node* andl = _gvn.transform(new (C) AndLNode(tvalue, bits)); | 2882 Node* andl = _gvn.transform(new (C) AndLNode(tvalue, bits)); |
3160 Node* clsused = longcon(0x01l); // set the class bit | 2883 Node* clsused = longcon(0x01l); // set the class bit |
3161 Node* orl = _gvn.transform(new (C) OrLNode(tvalue, clsused)); | 2884 Node* orl = _gvn.transform(new (C) OrLNode(tvalue, clsused)); |
3162 | 2885 |
3163 const TypePtr *adr_type = _gvn.type(insp)->isa_ptr(); | 2886 const TypePtr *adr_type = _gvn.type(insp)->isa_ptr(); |
3164 store_to_memory(control(), insp, orl, T_LONG, adr_type); | 2887 store_to_memory(control(), insp, orl, T_LONG, adr_type); |
3165 push_pair(andl); | 2888 set_result(andl); |
3166 return true; | 2889 return true; |
3167 } | 2890 } |
3168 | 2891 |
3169 bool LibraryCallKit::inline_native_threadID() { | 2892 bool LibraryCallKit::inline_native_threadID() { |
3170 Node* tls_ptr = NULL; | 2893 Node* tls_ptr = NULL; |
3175 | 2898 |
3176 Node* threadid = NULL; | 2899 Node* threadid = NULL; |
3177 size_t thread_id_size = OSThread::thread_id_size(); | 2900 size_t thread_id_size = OSThread::thread_id_size(); |
3178 if (thread_id_size == (size_t) BytesPerLong) { | 2901 if (thread_id_size == (size_t) BytesPerLong) { |
3179 threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG)); | 2902 threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG)); |
3180 push(threadid); | |
3181 } else if (thread_id_size == (size_t) BytesPerInt) { | 2903 } else if (thread_id_size == (size_t) BytesPerInt) { |
3182 threadid = make_load(control(), p, TypeInt::INT, T_INT); | 2904 threadid = make_load(control(), p, TypeInt::INT, T_INT); |
3183 push(threadid); | |
3184 } else { | 2905 } else { |
3185 ShouldNotReachHere(); | 2906 ShouldNotReachHere(); |
3186 } | 2907 } |
2908 set_result(threadid); | |
3187 return true; | 2909 return true; |
3188 } | 2910 } |
3189 #endif | 2911 #endif |
3190 | 2912 |
3191 //------------------------inline_native_time_funcs-------------- | 2913 //------------------------inline_native_time_funcs-------------- |
3192 // inline code for System.currentTimeMillis() and System.nanoTime() | 2914 // inline code for System.currentTimeMillis() and System.nanoTime() |
3193 // these have the same type and signature | 2915 // these have the same type and signature |
3194 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) { | 2916 bool LibraryCallKit::inline_native_time_funcs(address funcAddr, const char* funcName) { |
3195 const TypeFunc *tf = OptoRuntime::void_long_Type(); | 2917 const TypeFunc* tf = OptoRuntime::void_long_Type(); |
3196 const TypePtr* no_memory_effects = NULL; | 2918 const TypePtr* no_memory_effects = NULL; |
3197 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects); | 2919 Node* time = make_runtime_call(RC_LEAF, tf, funcAddr, funcName, no_memory_effects); |
3198 Node* value = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+0)); | 2920 Node* value = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+0)); |
3199 #ifdef ASSERT | 2921 #ifdef ASSERT |
3200 Node* value_top = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms + 1)); | 2922 Node* value_top = _gvn.transform(new (C) ProjNode(time, TypeFunc::Parms+1)); |
3201 assert(value_top == top(), "second value must be top"); | 2923 assert(value_top == top(), "second value must be top"); |
3202 #endif | 2924 #endif |
3203 push_pair(value); | 2925 set_result(value); |
3204 return true; | 2926 return true; |
3205 } | 2927 } |
3206 | 2928 |
3207 //------------------------inline_native_currentThread------------------ | 2929 //------------------------inline_native_currentThread------------------ |
3208 bool LibraryCallKit::inline_native_currentThread() { | 2930 bool LibraryCallKit::inline_native_currentThread() { |
3209 Node* junk = NULL; | 2931 Node* junk = NULL; |
3210 push(generate_current_thread(junk)); | 2932 set_result(generate_current_thread(junk)); |
3211 return true; | 2933 return true; |
3212 } | 2934 } |
3213 | 2935 |
3214 //------------------------inline_native_isInterrupted------------------ | 2936 //------------------------inline_native_isInterrupted------------------ |
2937 // private native boolean java.lang.Thread.isInterrupted(boolean ClearInterrupted); | |
3215 bool LibraryCallKit::inline_native_isInterrupted() { | 2938 bool LibraryCallKit::inline_native_isInterrupted() { |
3216 const int nargs = 1+1; // receiver + boolean | |
3217 assert(nargs == arg_size(), "sanity"); | |
3218 // Add a fast path to t.isInterrupted(clear_int): | 2939 // Add a fast path to t.isInterrupted(clear_int): |
3219 // (t == Thread.current() && (!TLS._osthread._interrupted || !clear_int)) | 2940 // (t == Thread.current() && (!TLS._osthread._interrupted || !clear_int)) |
3220 // ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int) | 2941 // ? TLS._osthread._interrupted : /*slow path:*/ t.isInterrupted(clear_int) |
3221 // So, in the common case that the interrupt bit is false, | 2942 // So, in the common case that the interrupt bit is false, |
3222 // we avoid making a call into the VM. Even if the interrupt bit | 2943 // we avoid making a call into the VM. Even if the interrupt bit |
3310 | 3031 |
3311 set_all_memory( _gvn.transform(mem_phi) ); | 3032 set_all_memory( _gvn.transform(mem_phi) ); |
3312 set_i_o( _gvn.transform(io_phi) ); | 3033 set_i_o( _gvn.transform(io_phi) ); |
3313 } | 3034 } |
3314 | 3035 |
3315 push_result(result_rgn, result_val); | |
3316 C->set_has_split_ifs(true); // Has chance for split-if optimization | 3036 C->set_has_split_ifs(true); // Has chance for split-if optimization |
3317 | 3037 set_result(result_rgn, result_val); |
3318 return true; | 3038 return true; |
3319 } | 3039 } |
3320 | 3040 |
3321 //---------------------------load_mirror_from_klass---------------------------- | 3041 //---------------------------load_mirror_from_klass---------------------------- |
3322 // Given a klass oop, load its java mirror (a java.lang.Class oop). | 3042 // Given a klass oop, load its java mirror (a java.lang.Class oop). |
3332 // If never_see_null, take an uncommon trap on null, so we can optimistically | 3052 // If never_see_null, take an uncommon trap on null, so we can optimistically |
3333 // compile for the non-null case. | 3053 // compile for the non-null case. |
3334 // If the region is NULL, force never_see_null = true. | 3054 // If the region is NULL, force never_see_null = true. |
3335 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror, | 3055 Node* LibraryCallKit::load_klass_from_mirror_common(Node* mirror, |
3336 bool never_see_null, | 3056 bool never_see_null, |
3337 int nargs, | |
3338 RegionNode* region, | 3057 RegionNode* region, |
3339 int null_path, | 3058 int null_path, |
3340 int offset) { | 3059 int offset) { |
3341 if (region == NULL) never_see_null = true; | 3060 if (region == NULL) never_see_null = true; |
3342 Node* p = basic_plus_adr(mirror, offset); | 3061 Node* p = basic_plus_adr(mirror, offset); |
3343 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL; | 3062 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL; |
3344 Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type) ); | 3063 Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type) ); |
3345 _sp += nargs; // any deopt will start just before call to enclosing method | |
3346 Node* null_ctl = top(); | 3064 Node* null_ctl = top(); |
3347 kls = null_check_oop(kls, &null_ctl, never_see_null); | 3065 kls = null_check_oop(kls, &null_ctl, never_see_null); |
3348 if (region != NULL) { | 3066 if (region != NULL) { |
3349 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class). | 3067 // Set region->in(null_path) if the mirror is a primitive (e.g, int.class). |
3350 region->init_req(null_path, null_ctl); | 3068 region->init_req(null_path, null_ctl); |
3351 } else { | 3069 } else { |
3352 assert(null_ctl == top(), "no loose ends"); | 3070 assert(null_ctl == top(), "no loose ends"); |
3353 } | 3071 } |
3354 _sp -= nargs; | |
3355 return kls; | 3072 return kls; |
3356 } | 3073 } |
3357 | 3074 |
3358 //--------------------(inline_native_Class_query helpers)--------------------- | 3075 //--------------------(inline_native_Class_query helpers)--------------------- |
3359 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE, JVM_ACC_HAS_FINALIZER. | 3076 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE, JVM_ACC_HAS_FINALIZER. |
3374 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region); | 3091 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region); |
3375 } | 3092 } |
3376 | 3093 |
3377 //-------------------------inline_native_Class_query------------------- | 3094 //-------------------------inline_native_Class_query------------------- |
3378 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) { | 3095 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) { |
3379 int nargs = 1+0; // just the Class mirror, in most cases | |
3380 const Type* return_type = TypeInt::BOOL; | 3096 const Type* return_type = TypeInt::BOOL; |
3381 Node* prim_return_value = top(); // what happens if it's a primitive class? | 3097 Node* prim_return_value = top(); // what happens if it's a primitive class? |
3382 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check); | 3098 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check); |
3383 bool expect_prim = false; // most of these guys expect to work on refs | 3099 bool expect_prim = false; // most of these guys expect to work on refs |
3384 | 3100 |
3385 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT }; | 3101 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT }; |
3386 | 3102 |
3103 Node* mirror = argument(0); | |
3104 Node* obj = top(); | |
3105 | |
3387 switch (id) { | 3106 switch (id) { |
3388 case vmIntrinsics::_isInstance: | 3107 case vmIntrinsics::_isInstance: |
3389 nargs = 1+1; // the Class mirror, plus the object getting queried about | |
3390 // nothing is an instance of a primitive type | 3108 // nothing is an instance of a primitive type |
3391 prim_return_value = intcon(0); | 3109 prim_return_value = intcon(0); |
3110 obj = argument(1); | |
3392 break; | 3111 break; |
3393 case vmIntrinsics::_getModifiers: | 3112 case vmIntrinsics::_getModifiers: |
3394 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC); | 3113 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC); |
3395 assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line"); | 3114 assert(is_power_of_2((int)JVM_ACC_WRITTEN_FLAGS+1), "change next line"); |
3396 return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin); | 3115 return_type = TypeInt::make(0, JVM_ACC_WRITTEN_FLAGS, Type::WidenMin); |
3417 case vmIntrinsics::_getClassAccessFlags: | 3136 case vmIntrinsics::_getClassAccessFlags: |
3418 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC); | 3137 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC); |
3419 return_type = TypeInt::INT; // not bool! 6297094 | 3138 return_type = TypeInt::INT; // not bool! 6297094 |
3420 break; | 3139 break; |
3421 default: | 3140 default: |
3422 ShouldNotReachHere(); | 3141 fatal_unexpected_iid(id); |
3423 } | 3142 break; |
3424 | 3143 } |
3425 Node* mirror = argument(0); | |
3426 Node* obj = (nargs <= 1)? top(): argument(1); | |
3427 | 3144 |
3428 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr(); | 3145 const TypeInstPtr* mirror_con = _gvn.type(mirror)->isa_instptr(); |
3429 if (mirror_con == NULL) return false; // cannot happen? | 3146 if (mirror_con == NULL) return false; // cannot happen? |
3430 | 3147 |
3431 #ifndef PRODUCT | 3148 #ifndef PRODUCT |
3449 // if it is. See bug 4774291. | 3166 // if it is. See bug 4774291. |
3450 | 3167 |
3451 // For Reflection.getClassAccessFlags(), the null check occurs in | 3168 // For Reflection.getClassAccessFlags(), the null check occurs in |
3452 // the wrong place; see inline_unsafe_access(), above, for a similar | 3169 // the wrong place; see inline_unsafe_access(), above, for a similar |
3453 // situation. | 3170 // situation. |
3454 _sp += nargs; // set original stack for use by uncommon_trap | 3171 mirror = null_check(mirror); |
3455 mirror = do_null_check(mirror, T_OBJECT); | |
3456 _sp -= nargs; | |
3457 // If mirror or obj is dead, only null-path is taken. | 3172 // If mirror or obj is dead, only null-path is taken. |
3458 if (stopped()) return true; | 3173 if (stopped()) return true; |
3459 | 3174 |
3460 if (expect_prim) never_see_null = false; // expect nulls (meaning prims) | 3175 if (expect_prim) never_see_null = false; // expect nulls (meaning prims) |
3461 | 3176 |
3462 // Now load the mirror's klass metaobject, and null-check it. | 3177 // Now load the mirror's klass metaobject, and null-check it. |
3463 // Side-effects region with the control path if the klass is null. | 3178 // Side-effects region with the control path if the klass is null. |
3464 Node* kls = load_klass_from_mirror(mirror, never_see_null, nargs, | 3179 Node* kls = load_klass_from_mirror(mirror, never_see_null, region, _prim_path); |
3465 region, _prim_path); | |
3466 // If kls is null, we have a primitive mirror. | 3180 // If kls is null, we have a primitive mirror. |
3467 phi->init_req(_prim_path, prim_return_value); | 3181 phi->init_req(_prim_path, prim_return_value); |
3468 if (stopped()) { push_result(region, phi); return true; } | 3182 if (stopped()) { set_result(region, phi); return true; } |
3469 | 3183 |
3470 Node* p; // handy temp | 3184 Node* p; // handy temp |
3471 Node* null_ctl; | 3185 Node* null_ctl; |
3472 | 3186 |
3473 // Now that we have the non-null klass, we can perform the real query. | 3187 // Now that we have the non-null klass, we can perform the real query. |
3474 // For constant classes, the query will constant-fold in LoadNode::Value. | 3188 // For constant classes, the query will constant-fold in LoadNode::Value. |
3475 Node* query_value = top(); | 3189 Node* query_value = top(); |
3476 switch (id) { | 3190 switch (id) { |
3477 case vmIntrinsics::_isInstance: | 3191 case vmIntrinsics::_isInstance: |
3478 // nothing is an instance of a primitive type | 3192 // nothing is an instance of a primitive type |
3479 _sp += nargs; // gen_instanceof might do an uncommon trap | |
3480 query_value = gen_instanceof(obj, kls); | 3193 query_value = gen_instanceof(obj, kls); |
3481 _sp -= nargs; | |
3482 break; | 3194 break; |
3483 | 3195 |
3484 case vmIntrinsics::_getModifiers: | 3196 case vmIntrinsics::_getModifiers: |
3485 p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset())); | 3197 p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset())); |
3486 query_value = make_load(NULL, p, TypeInt::INT, T_INT); | 3198 query_value = make_load(NULL, p, TypeInt::INT, T_INT); |
3551 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); | 3263 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); |
3552 query_value = make_load(NULL, p, TypeInt::INT, T_INT); | 3264 query_value = make_load(NULL, p, TypeInt::INT, T_INT); |
3553 break; | 3265 break; |
3554 | 3266 |
3555 default: | 3267 default: |
3556 ShouldNotReachHere(); | 3268 fatal_unexpected_iid(id); |
3269 break; | |
3557 } | 3270 } |
3558 | 3271 |
3559 // Fall-through is the normal case of a query to a real class. | 3272 // Fall-through is the normal case of a query to a real class. |
3560 phi->init_req(1, query_value); | 3273 phi->init_req(1, query_value); |
3561 region->init_req(1, control()); | 3274 region->init_req(1, control()); |
3562 | 3275 |
3563 push_result(region, phi); | |
3564 C->set_has_split_ifs(true); // Has chance for split-if optimization | 3276 C->set_has_split_ifs(true); // Has chance for split-if optimization |
3565 | 3277 set_result(region, phi); |
3566 return true; | 3278 return true; |
3567 } | 3279 } |
3568 | 3280 |
3569 //--------------------------inline_native_subtype_check------------------------ | 3281 //--------------------------inline_native_subtype_check------------------------ |
3570 // This intrinsic takes the JNI calls out of the heart of | 3282 // This intrinsic takes the JNI calls out of the heart of |
3571 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc. | 3283 // UnsafeFieldAccessorImpl.set, which improves Field.set, readObject, etc. |
3572 bool LibraryCallKit::inline_native_subtype_check() { | 3284 bool LibraryCallKit::inline_native_subtype_check() { |
3573 int nargs = 1+1; // the Class mirror, plus the other class getting examined | |
3574 | |
3575 // Pull both arguments off the stack. | 3285 // Pull both arguments off the stack. |
3576 Node* args[2]; // two java.lang.Class mirrors: superc, subc | 3286 Node* args[2]; // two java.lang.Class mirrors: superc, subc |
3577 args[0] = argument(0); | 3287 args[0] = argument(0); |
3578 args[1] = argument(1); | 3288 args[1] = argument(1); |
3579 Node* klasses[2]; // corresponding Klasses: superk, subk | 3289 Node* klasses[2]; // corresponding Klasses: superk, subk |
3600 | 3310 |
3601 // First null-check both mirrors and load each mirror's klass metaobject. | 3311 // First null-check both mirrors and load each mirror's klass metaobject. |
3602 int which_arg; | 3312 int which_arg; |
3603 for (which_arg = 0; which_arg <= 1; which_arg++) { | 3313 for (which_arg = 0; which_arg <= 1; which_arg++) { |
3604 Node* arg = args[which_arg]; | 3314 Node* arg = args[which_arg]; |
3605 _sp += nargs; // set original stack for use by uncommon_trap | 3315 arg = null_check(arg); |
3606 arg = do_null_check(arg, T_OBJECT); | |
3607 _sp -= nargs; | |
3608 if (stopped()) break; | 3316 if (stopped()) break; |
3609 args[which_arg] = _gvn.transform(arg); | 3317 args[which_arg] = _gvn.transform(arg); |
3610 | 3318 |
3611 Node* p = basic_plus_adr(arg, class_klass_offset); | 3319 Node* p = basic_plus_adr(arg, class_klass_offset); |
3612 Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type); | 3320 Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type); |
3616 // Having loaded both klasses, test each for null. | 3324 // Having loaded both klasses, test each for null. |
3617 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check); | 3325 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check); |
3618 for (which_arg = 0; which_arg <= 1; which_arg++) { | 3326 for (which_arg = 0; which_arg <= 1; which_arg++) { |
3619 Node* kls = klasses[which_arg]; | 3327 Node* kls = klasses[which_arg]; |
3620 Node* null_ctl = top(); | 3328 Node* null_ctl = top(); |
3621 _sp += nargs; // set original stack for use by uncommon_trap | |
3622 kls = null_check_oop(kls, &null_ctl, never_see_null); | 3329 kls = null_check_oop(kls, &null_ctl, never_see_null); |
3623 _sp -= nargs; | |
3624 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path); | 3330 int prim_path = (which_arg == 0 ? _prim_0_path : _prim_1_path); |
3625 region->init_req(prim_path, null_ctl); | 3331 region->init_req(prim_path, null_ctl); |
3626 if (stopped()) break; | 3332 if (stopped()) break; |
3627 klasses[which_arg] = kls; | 3333 klasses[which_arg] = kls; |
3628 } | 3334 } |
3668 phi->set_req(i, intcon(0)); // all other paths produce 'false' | 3374 phi->set_req(i, intcon(0)); // all other paths produce 'false' |
3669 } | 3375 } |
3670 } | 3376 } |
3671 | 3377 |
3672 set_control(_gvn.transform(region)); | 3378 set_control(_gvn.transform(region)); |
3673 push(_gvn.transform(phi)); | 3379 set_result(_gvn.transform(phi)); |
3674 | |
3675 return true; | 3380 return true; |
3676 } | 3381 } |
3677 | 3382 |
3678 //---------------------generate_array_guard_common------------------------ | 3383 //---------------------generate_array_guard_common------------------------ |
3679 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, | 3384 Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, |
3717 return generate_fair_guard(bol, region); | 3422 return generate_fair_guard(bol, region); |
3718 } | 3423 } |
3719 | 3424 |
3720 | 3425 |
3721 //-----------------------inline_native_newArray-------------------------- | 3426 //-----------------------inline_native_newArray-------------------------- |
3427 // private static native Object java.lang.reflect.newArray(Class<?> componentType, int length); | |
3722 bool LibraryCallKit::inline_native_newArray() { | 3428 bool LibraryCallKit::inline_native_newArray() { |
3723 int nargs = 2; | |
3724 Node* mirror = argument(0); | 3429 Node* mirror = argument(0); |
3725 Node* count_val = argument(1); | 3430 Node* count_val = argument(1); |
3726 | 3431 |
3727 _sp += nargs; // set original stack for use by uncommon_trap | 3432 mirror = null_check(mirror); |
3728 mirror = do_null_check(mirror, T_OBJECT); | |
3729 _sp -= nargs; | |
3730 // If mirror or obj is dead, only null-path is taken. | 3433 // If mirror or obj is dead, only null-path is taken. |
3731 if (stopped()) return true; | 3434 if (stopped()) return true; |
3732 | 3435 |
3733 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT }; | 3436 enum { _normal_path = 1, _slow_path = 2, PATH_LIMIT }; |
3734 RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT); | 3437 RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT); |
3738 PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY, | 3441 PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY, |
3739 TypePtr::BOTTOM); | 3442 TypePtr::BOTTOM); |
3740 | 3443 |
3741 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check); | 3444 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check); |
3742 Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null, | 3445 Node* klass_node = load_array_klass_from_mirror(mirror, never_see_null, |
3743 nargs, | |
3744 result_reg, _slow_path); | 3446 result_reg, _slow_path); |
3745 Node* normal_ctl = control(); | 3447 Node* normal_ctl = control(); |
3746 Node* no_array_ctl = result_reg->in(_slow_path); | 3448 Node* no_array_ctl = result_reg->in(_slow_path); |
3747 | 3449 |
3748 // Generate code for the slow case. We make a call to newArray(). | 3450 // Generate code for the slow case. We make a call to newArray(). |
3765 set_control(normal_ctl); | 3467 set_control(normal_ctl); |
3766 if (!stopped()) { | 3468 if (!stopped()) { |
3767 // Normal case: The array type has been cached in the java.lang.Class. | 3469 // Normal case: The array type has been cached in the java.lang.Class. |
3768 // The following call works fine even if the array type is polymorphic. | 3470 // The following call works fine even if the array type is polymorphic. |
3769 // It could be a dynamic mix of int[], boolean[], Object[], etc. | 3471 // It could be a dynamic mix of int[], boolean[], Object[], etc. |
3770 Node* obj = new_array(klass_node, count_val, nargs); | 3472 Node* obj = new_array(klass_node, count_val, 0); // no arguments to push |
3771 result_reg->init_req(_normal_path, control()); | 3473 result_reg->init_req(_normal_path, control()); |
3772 result_val->init_req(_normal_path, obj); | 3474 result_val->init_req(_normal_path, obj); |
3773 result_io ->init_req(_normal_path, i_o()); | 3475 result_io ->init_req(_normal_path, i_o()); |
3774 result_mem->init_req(_normal_path, reset_memory()); | 3476 result_mem->init_req(_normal_path, reset_memory()); |
3775 } | 3477 } |
3776 | 3478 |
3777 // Return the combined state. | 3479 // Return the combined state. |
3778 set_i_o( _gvn.transform(result_io) ); | 3480 set_i_o( _gvn.transform(result_io) ); |
3779 set_all_memory( _gvn.transform(result_mem) ); | 3481 set_all_memory( _gvn.transform(result_mem) ); |
3780 push_result(result_reg, result_val); | 3482 |
3781 C->set_has_split_ifs(true); // Has chance for split-if optimization | 3483 C->set_has_split_ifs(true); // Has chance for split-if optimization |
3782 | 3484 set_result(result_reg, result_val); |
3783 return true; | 3485 return true; |
3784 } | 3486 } |
3785 | 3487 |
3786 //----------------------inline_native_getLength-------------------------- | 3488 //----------------------inline_native_getLength-------------------------- |
3489 // public static native int java.lang.reflect.Array.getLength(Object array); | |
3787 bool LibraryCallKit::inline_native_getLength() { | 3490 bool LibraryCallKit::inline_native_getLength() { |
3788 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false; | 3491 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false; |
3789 | 3492 |
3790 int nargs = 1; | 3493 Node* array = null_check(argument(0)); |
3791 Node* array = argument(0); | |
3792 | |
3793 _sp += nargs; // set original stack for use by uncommon_trap | |
3794 array = do_null_check(array, T_OBJECT); | |
3795 _sp -= nargs; | |
3796 | |
3797 // If array is dead, only null-path is taken. | 3494 // If array is dead, only null-path is taken. |
3798 if (stopped()) return true; | 3495 if (stopped()) return true; |
3799 | 3496 |
3800 // Deoptimize if it is a non-array. | 3497 // Deoptimize if it is a non-array. |
3801 Node* non_array = generate_non_array_guard(load_object_klass(array), NULL); | 3498 Node* non_array = generate_non_array_guard(load_object_klass(array), NULL); |
3802 | 3499 |
3803 if (non_array != NULL) { | 3500 if (non_array != NULL) { |
3804 PreserveJVMState pjvms(this); | 3501 PreserveJVMState pjvms(this); |
3805 set_control(non_array); | 3502 set_control(non_array); |
3806 _sp += nargs; // push the arguments back on the stack | |
3807 uncommon_trap(Deoptimization::Reason_intrinsic, | 3503 uncommon_trap(Deoptimization::Reason_intrinsic, |
3808 Deoptimization::Action_maybe_recompile); | 3504 Deoptimization::Action_maybe_recompile); |
3809 } | 3505 } |
3810 | 3506 |
3811 // If control is dead, only non-array-path is taken. | 3507 // If control is dead, only non-array-path is taken. |
3812 if (stopped()) return true; | 3508 if (stopped()) return true; |
3813 | 3509 |
3814 // The works fine even if the array type is polymorphic. | 3510 // The works fine even if the array type is polymorphic. |
3815 // It could be a dynamic mix of int[], boolean[], Object[], etc. | 3511 // It could be a dynamic mix of int[], boolean[], Object[], etc. |
3816 push( load_array_length(array) ); | 3512 Node* result = load_array_length(array); |
3817 | 3513 |
3818 C->set_has_split_ifs(true); // Has chance for split-if optimization | 3514 C->set_has_split_ifs(true); // Has chance for split-if optimization |
3819 | 3515 set_result(result); |
3820 return true; | 3516 return true; |
3821 } | 3517 } |
3822 | 3518 |
3823 //------------------------inline_array_copyOf---------------------------- | 3519 //------------------------inline_array_copyOf---------------------------- |
3520 // public static <T,U> T[] java.util.Arrays.copyOf( U[] original, int newLength, Class<? extends T[]> newType); | |
3521 // public static <T,U> T[] java.util.Arrays.copyOfRange(U[] original, int from, int to, Class<? extends T[]> newType); | |
3824 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) { | 3522 bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) { |
3523 return false; | |
3825 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false; | 3524 if (too_many_traps(Deoptimization::Reason_intrinsic)) return false; |
3826 | 3525 |
3827 // Restore the stack and pop off the arguments. | 3526 // Get the arguments. |
3828 int nargs = 3 + (is_copyOfRange? 1: 0); | |
3829 Node* original = argument(0); | 3527 Node* original = argument(0); |
3830 Node* start = is_copyOfRange? argument(1): intcon(0); | 3528 Node* start = is_copyOfRange? argument(1): intcon(0); |
3831 Node* end = is_copyOfRange? argument(2): argument(1); | 3529 Node* end = is_copyOfRange? argument(2): argument(1); |
3832 Node* array_type_mirror = is_copyOfRange? argument(3): argument(2); | 3530 Node* array_type_mirror = is_copyOfRange? argument(3): argument(2); |
3833 | 3531 |
3834 Node* newcopy; | 3532 Node* newcopy; |
3835 | 3533 |
3836 //set the original stack and the reexecute bit for the interpreter to reexecute | 3534 // Set the original stack and the reexecute bit for the interpreter to reexecute |
3837 //the bytecode that invokes Arrays.copyOf if deoptimization happens | 3535 // the bytecode that invokes Arrays.copyOf if deoptimization happens. |
3838 { PreserveReexecuteState preexecs(this); | 3536 { PreserveReexecuteState preexecs(this); |
3839 _sp += nargs; | |
3840 jvms()->set_should_reexecute(true); | 3537 jvms()->set_should_reexecute(true); |
3841 | 3538 |
3842 array_type_mirror = do_null_check(array_type_mirror, T_OBJECT); | 3539 array_type_mirror = null_check(array_type_mirror); |
3843 original = do_null_check(original, T_OBJECT); | 3540 original = null_check(original); |
3844 | 3541 |
3845 // Check if a null path was taken unconditionally. | 3542 // Check if a null path was taken unconditionally. |
3846 if (stopped()) return true; | 3543 if (stopped()) return true; |
3847 | 3544 |
3848 Node* orig_length = load_array_length(original); | 3545 Node* orig_length = load_array_length(original); |
3849 | 3546 |
3850 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, 0, | 3547 Node* klass_node = load_klass_from_mirror(array_type_mirror, false, NULL, 0); |
3851 NULL, 0); | 3548 klass_node = null_check(klass_node); |
3852 klass_node = do_null_check(klass_node, T_OBJECT); | |
3853 | 3549 |
3854 RegionNode* bailout = new (C) RegionNode(1); | 3550 RegionNode* bailout = new (C) RegionNode(1); |
3855 record_for_igvn(bailout); | 3551 record_for_igvn(bailout); |
3856 | 3552 |
3857 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc. | 3553 // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc. |
3870 generate_negative_guard(start, bailout, &start); | 3566 generate_negative_guard(start, bailout, &start); |
3871 generate_negative_guard(end, bailout, &end); | 3567 generate_negative_guard(end, bailout, &end); |
3872 | 3568 |
3873 Node* length = end; | 3569 Node* length = end; |
3874 if (_gvn.type(start) != TypeInt::ZERO) { | 3570 if (_gvn.type(start) != TypeInt::ZERO) { |
3875 length = _gvn.transform( new (C) SubINode(end, start) ); | 3571 length = _gvn.transform(new (C) SubINode(end, start)); |
3876 } | 3572 } |
3877 | 3573 |
3878 // Bail out if length is negative. | 3574 // Bail out if length is negative. |
3879 // Without this the new_array would throw | 3575 // Without this the new_array would throw |
3880 // NegativeArraySizeException but IllegalArgumentException is what | 3576 // NegativeArraySizeException but IllegalArgumentException is what |
3881 // should be thrown | 3577 // should be thrown |
3882 generate_negative_guard(length, bailout, &length); | 3578 generate_negative_guard(length, bailout, &length); |
3883 | 3579 |
3884 if (bailout->req() > 1) { | 3580 if (bailout->req() > 1) { |
3885 PreserveJVMState pjvms(this); | 3581 PreserveJVMState pjvms(this); |
3886 set_control( _gvn.transform(bailout) ); | 3582 set_control(_gvn.transform(bailout)); |
3887 uncommon_trap(Deoptimization::Reason_intrinsic, | 3583 uncommon_trap(Deoptimization::Reason_intrinsic, |
3888 Deoptimization::Action_maybe_recompile); | 3584 Deoptimization::Action_maybe_recompile); |
3889 } | 3585 } |
3890 | 3586 |
3891 if (!stopped()) { | 3587 if (!stopped()) { |
3892 | |
3893 // How many elements will we copy from the original? | 3588 // How many elements will we copy from the original? |
3894 // The answer is MinI(orig_length - start, length). | 3589 // The answer is MinI(orig_length - start, length). |
3895 Node* orig_tail = _gvn.transform( new(C) SubINode(orig_length, start) ); | 3590 Node* orig_tail = _gvn.transform(new (C) SubINode(orig_length, start)); |
3896 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length); | 3591 Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length); |
3897 | 3592 |
3898 newcopy = new_array(klass_node, length, 0); | 3593 newcopy = new_array(klass_node, length, 0); // no argments to push |
3899 | 3594 |
3900 // Generate a direct call to the right arraycopy function(s). | 3595 // Generate a direct call to the right arraycopy function(s). |
3901 // We know the copy is disjoint but we might not know if the | 3596 // We know the copy is disjoint but we might not know if the |
3902 // oop stores need checking. | 3597 // oop stores need checking. |
3903 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class). | 3598 // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class). |
3908 bool length_never_negative = !is_copyOfRange; | 3603 bool length_never_negative = !is_copyOfRange; |
3909 generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT, | 3604 generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT, |
3910 original, start, newcopy, intcon(0), moved, | 3605 original, start, newcopy, intcon(0), moved, |
3911 disjoint_bases, length_never_negative); | 3606 disjoint_bases, length_never_negative); |
3912 } | 3607 } |
3913 } //original reexecute and sp are set back here | 3608 } // original reexecute is set back here |
3914 | |
3915 if(!stopped()) { | |
3916 push(newcopy); | |
3917 } | |
3918 | 3609 |
3919 C->set_has_split_ifs(true); // Has chance for split-if optimization | 3610 C->set_has_split_ifs(true); // Has chance for split-if optimization |
3920 | 3611 if (!stopped()) { |
3612 set_result(newcopy); | |
3613 } | |
3921 return true; | 3614 return true; |
3922 } | 3615 } |
3923 | 3616 |
3924 | 3617 |
3925 //----------------------generate_virtual_guard--------------------------- | 3618 //----------------------generate_virtual_guard--------------------------- |
3967 assert(!is_virtual, ""); | 3660 assert(!is_virtual, ""); |
3968 slow_call = new(C) CallStaticJavaNode(tf, | 3661 slow_call = new(C) CallStaticJavaNode(tf, |
3969 SharedRuntime::get_resolve_static_call_stub(), | 3662 SharedRuntime::get_resolve_static_call_stub(), |
3970 method, bci()); | 3663 method, bci()); |
3971 } else if (is_virtual) { | 3664 } else if (is_virtual) { |
3972 null_check_receiver(method); | 3665 null_check_receiver(); |
3973 int vtable_index = Method::invalid_vtable_index; | 3666 int vtable_index = Method::invalid_vtable_index; |
3974 if (UseInlineCaches) { | 3667 if (UseInlineCaches) { |
3975 // Suppress the vtable call | 3668 // Suppress the vtable call |
3976 } else { | 3669 } else { |
3977 // hashCode and clone are not a miranda methods, | 3670 // hashCode and clone are not a miranda methods, |
3981 } | 3674 } |
3982 slow_call = new(C) CallDynamicJavaNode(tf, | 3675 slow_call = new(C) CallDynamicJavaNode(tf, |
3983 SharedRuntime::get_resolve_virtual_call_stub(), | 3676 SharedRuntime::get_resolve_virtual_call_stub(), |
3984 method, vtable_index, bci()); | 3677 method, vtable_index, bci()); |
3985 } else { // neither virtual nor static: opt_virtual | 3678 } else { // neither virtual nor static: opt_virtual |
3986 null_check_receiver(method); | 3679 null_check_receiver(); |
3987 slow_call = new(C) CallStaticJavaNode(tf, | 3680 slow_call = new(C) CallStaticJavaNode(tf, |
3988 SharedRuntime::get_resolve_opt_virtual_call_stub(), | 3681 SharedRuntime::get_resolve_opt_virtual_call_stub(), |
3989 method, bci()); | 3682 method, bci()); |
3990 slow_call->set_optimized_virtual(true); | 3683 slow_call->set_optimized_virtual(true); |
3991 } | 3684 } |
4010 PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY, | 3703 PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY, |
4011 TypePtr::BOTTOM); | 3704 TypePtr::BOTTOM); |
4012 Node* obj = NULL; | 3705 Node* obj = NULL; |
4013 if (!is_static) { | 3706 if (!is_static) { |
4014 // Check for hashing null object | 3707 // Check for hashing null object |
4015 obj = null_check_receiver(callee()); | 3708 obj = null_check_receiver(); |
4016 if (stopped()) return true; // unconditionally null | 3709 if (stopped()) return true; // unconditionally null |
4017 result_reg->init_req(_null_path, top()); | 3710 result_reg->init_req(_null_path, top()); |
4018 result_val->init_req(_null_path, top()); | 3711 result_val->init_req(_null_path, top()); |
4019 } else { | 3712 } else { |
4020 // Do a null check, and return zero if null. | 3713 // Do a null check, and return zero if null. |
4026 result_val->init_req(_null_path, _gvn.intcon(0)); | 3719 result_val->init_req(_null_path, _gvn.intcon(0)); |
4027 } | 3720 } |
4028 | 3721 |
4029 // Unconditionally null? Then return right away. | 3722 // Unconditionally null? Then return right away. |
4030 if (stopped()) { | 3723 if (stopped()) { |
4031 set_control( result_reg->in(_null_path) ); | 3724 set_control( result_reg->in(_null_path)); |
4032 if (!stopped()) | 3725 if (!stopped()) |
4033 push( result_val ->in(_null_path) ); | 3726 set_result(result_val->in(_null_path)); |
4034 return true; | 3727 return true; |
4035 } | 3728 } |
4036 | 3729 |
4037 // After null check, get the object's klass. | 3730 // After null check, get the object's klass. |
4038 Node* obj_klass = load_object_klass(obj); | 3731 Node* obj_klass = load_object_klass(obj); |
4101 // Generate code for the slow case. We make a call to hashCode(). | 3794 // Generate code for the slow case. We make a call to hashCode(). |
4102 set_control(_gvn.transform(slow_region)); | 3795 set_control(_gvn.transform(slow_region)); |
4103 if (!stopped()) { | 3796 if (!stopped()) { |
4104 // No need for PreserveJVMState, because we're using up the present state. | 3797 // No need for PreserveJVMState, because we're using up the present state. |
4105 set_all_memory(init_mem); | 3798 set_all_memory(init_mem); |
4106 vmIntrinsics::ID hashCode_id = vmIntrinsics::_hashCode; | 3799 vmIntrinsics::ID hashCode_id = is_static ? vmIntrinsics::_identityHashCode : vmIntrinsics::_hashCode; |
4107 if (is_static) hashCode_id = vmIntrinsics::_identityHashCode; | |
4108 CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static); | 3800 CallJavaNode* slow_call = generate_method_call(hashCode_id, is_virtual, is_static); |
4109 Node* slow_result = set_results_for_java_call(slow_call); | 3801 Node* slow_result = set_results_for_java_call(slow_call); |
4110 // this->control() comes from set_results_for_java_call | 3802 // this->control() comes from set_results_for_java_call |
4111 result_reg->init_req(_slow_path, control()); | 3803 result_reg->init_req(_slow_path, control()); |
4112 result_val->init_req(_slow_path, slow_result); | 3804 result_val->init_req(_slow_path, slow_result); |
4115 } | 3807 } |
4116 | 3808 |
4117 // Return the combined state. | 3809 // Return the combined state. |
4118 set_i_o( _gvn.transform(result_io) ); | 3810 set_i_o( _gvn.transform(result_io) ); |
4119 set_all_memory( _gvn.transform(result_mem) ); | 3811 set_all_memory( _gvn.transform(result_mem) ); |
4120 push_result(result_reg, result_val); | 3812 |
4121 | 3813 set_result(result_reg, result_val); |
4122 return true; | 3814 return true; |
4123 } | 3815 } |
4124 | 3816 |
4125 //---------------------------inline_native_getClass---------------------------- | 3817 //---------------------------inline_native_getClass---------------------------- |
3818 // public final native Class<?> java.lang.Object.getClass(); | |
3819 // | |
4126 // Build special case code for calls to getClass on an object. | 3820 // Build special case code for calls to getClass on an object. |
4127 bool LibraryCallKit::inline_native_getClass() { | 3821 bool LibraryCallKit::inline_native_getClass() { |
4128 Node* obj = null_check_receiver(callee()); | 3822 Node* obj = null_check_receiver(); |
4129 if (stopped()) return true; | 3823 if (stopped()) return true; |
4130 push( load_mirror_from_klass(load_object_klass(obj)) ); | 3824 set_result(load_mirror_from_klass(load_object_klass(obj))); |
4131 return true; | 3825 return true; |
4132 } | 3826 } |
4133 | 3827 |
4134 //-----------------inline_native_Reflection_getCallerClass--------------------- | 3828 //-----------------inline_native_Reflection_getCallerClass--------------------- |
3829 // public static native Class<?> sun.reflect.Reflection.getCallerClass(int realFramesToSkip); | |
3830 // | |
4135 // In the presence of deep enough inlining, getCallerClass() becomes a no-op. | 3831 // In the presence of deep enough inlining, getCallerClass() becomes a no-op. |
4136 // | 3832 // |
4137 // NOTE that this code must perform the same logic as | 3833 // NOTE that this code must perform the same logic as |
4138 // vframeStream::security_get_caller_frame in that it must skip | 3834 // vframeStream::security_get_caller_frame in that it must skip |
4139 // Method.invoke() and auxiliary frames. | 3835 // Method.invoke() and auxiliary frames. |
4140 | |
4141 | |
4142 | |
4143 | |
4144 bool LibraryCallKit::inline_native_Reflection_getCallerClass() { | 3836 bool LibraryCallKit::inline_native_Reflection_getCallerClass() { |
4145 ciMethod* method = callee(); | |
4146 | |
4147 #ifndef PRODUCT | 3837 #ifndef PRODUCT |
4148 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | 3838 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { |
4149 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass"); | 3839 tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass"); |
4150 } | 3840 } |
4151 #endif | 3841 #endif |
4152 | 3842 |
4153 debug_only(int saved_sp = _sp); | 3843 Node* caller_depth_node = argument(0); |
4154 | |
4155 // Argument words: (int depth) | |
4156 int nargs = 1; | |
4157 | |
4158 _sp += nargs; | |
4159 Node* caller_depth_node = pop(); | |
4160 | |
4161 assert(saved_sp == _sp, "must have correct argument count"); | |
4162 | 3844 |
4163 // The depth value must be a constant in order for the runtime call | 3845 // The depth value must be a constant in order for the runtime call |
4164 // to be eliminated. | 3846 // to be eliminated. |
4165 const TypeInt* caller_depth_type = _gvn.type(caller_depth_node)->isa_int(); | 3847 const TypeInt* caller_depth_type = _gvn.type(caller_depth_node)->isa_int(); |
4166 if (caller_depth_type == NULL || !caller_depth_type->is_con()) { | 3848 if (caller_depth_type == NULL || !caller_depth_type->is_con()) { |
4228 #ifndef PRODUCT | 3910 #ifndef PRODUCT |
4229 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | 3911 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { |
4230 tty->print_cr(" Bailing out because caller depth (%d) exceeded inlining depth (%d)", caller_depth_type->get_con(), _depth); | 3912 tty->print_cr(" Bailing out because caller depth (%d) exceeded inlining depth (%d)", caller_depth_type->get_con(), _depth); |
4231 tty->print_cr(" JVM state at this point:"); | 3913 tty->print_cr(" JVM state at this point:"); |
4232 for (int i = _depth; i >= 1; i--) { | 3914 for (int i = _depth; i >= 1; i--) { |
4233 tty->print_cr(" %d) %s", i, jvms()->of_depth(i)->method()->name()->as_utf8()); | 3915 ciMethod* m = jvms()->of_depth(i)->method(); |
3916 tty->print_cr(" %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8()); | |
4234 } | 3917 } |
4235 } | 3918 } |
4236 #endif | 3919 #endif |
4237 return false; // Reached end of inlining | 3920 return false; // Reached end of inlining |
4238 } | 3921 } |
4239 | 3922 |
4240 // Acquire method holder as java.lang.Class | 3923 // Acquire method holder as java.lang.Class |
4241 ciInstanceKlass* caller_klass = caller_jvms->method()->holder(); | 3924 ciInstanceKlass* caller_klass = caller_jvms->method()->holder(); |
4242 ciInstance* caller_mirror = caller_klass->java_mirror(); | 3925 ciInstance* caller_mirror = caller_klass->java_mirror(); |
3926 | |
4243 // Push this as a constant | 3927 // Push this as a constant |
4244 push(makecon(TypeInstPtr::make(caller_mirror))); | 3928 set_result(makecon(TypeInstPtr::make(caller_mirror))); |
3929 | |
4245 #ifndef PRODUCT | 3930 #ifndef PRODUCT |
4246 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { | 3931 if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) { |
4247 tty->print_cr(" Succeeded: caller = %s.%s, caller depth = %d, depth = %d", caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), caller_depth_type->get_con(), _depth); | 3932 tty->print_cr(" Succeeded: caller = %s.%s, caller depth = %d, depth = %d", caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), caller_depth_type->get_con(), _depth); |
4248 tty->print_cr(" JVM state at this point:"); | 3933 tty->print_cr(" JVM state at this point:"); |
4249 for (int i = _depth; i >= 1; i--) { | 3934 for (int i = _depth; i >= 1; i--) { |
4250 tty->print_cr(" %d) %s", i, jvms()->of_depth(i)->method()->name()->as_utf8()); | 3935 ciMethod* m = jvms()->of_depth(i)->method(); |
3936 tty->print_cr(" %d) %s.%s", i, m->holder()->name()->as_utf8(), m->name()->as_utf8()); | |
4251 } | 3937 } |
4252 } | 3938 } |
4253 #endif | 3939 #endif |
4254 return true; | 3940 return true; |
4255 } | 3941 } |
4281 | 3967 |
4282 return false; | 3968 return false; |
4283 } | 3969 } |
4284 | 3970 |
4285 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) { | 3971 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) { |
4286 // restore the arguments | 3972 Node* arg = argument(0); |
4287 _sp += arg_size(); | 3973 Node* result; |
4288 | 3974 |
4289 switch (id) { | 3975 switch (id) { |
4290 case vmIntrinsics::_floatToRawIntBits: | 3976 case vmIntrinsics::_floatToRawIntBits: result = new (C) MoveF2INode(arg); break; |
4291 push(_gvn.transform( new (C) MoveF2INode(pop()))); | 3977 case vmIntrinsics::_intBitsToFloat: result = new (C) MoveI2FNode(arg); break; |
4292 break; | 3978 case vmIntrinsics::_doubleToRawLongBits: result = new (C) MoveD2LNode(arg); break; |
4293 | 3979 case vmIntrinsics::_longBitsToDouble: result = new (C) MoveL2DNode(arg); break; |
4294 case vmIntrinsics::_intBitsToFloat: | |
4295 push(_gvn.transform( new (C) MoveI2FNode(pop()))); | |
4296 break; | |
4297 | |
4298 case vmIntrinsics::_doubleToRawLongBits: | |
4299 push_pair(_gvn.transform( new (C) MoveD2LNode(pop_pair()))); | |
4300 break; | |
4301 | |
4302 case vmIntrinsics::_longBitsToDouble: | |
4303 push_pair(_gvn.transform( new (C) MoveL2DNode(pop_pair()))); | |
4304 break; | |
4305 | 3980 |
4306 case vmIntrinsics::_doubleToLongBits: { | 3981 case vmIntrinsics::_doubleToLongBits: { |
4307 Node* value = pop_pair(); | |
4308 | |
4309 // two paths (plus control) merge in a wood | 3982 // two paths (plus control) merge in a wood |
4310 RegionNode *r = new (C) RegionNode(3); | 3983 RegionNode *r = new (C) RegionNode(3); |
4311 Node *phi = new (C) PhiNode(r, TypeLong::LONG); | 3984 Node *phi = new (C) PhiNode(r, TypeLong::LONG); |
4312 | 3985 |
4313 Node *cmpisnan = _gvn.transform( new (C) CmpDNode(value, value)); | 3986 Node *cmpisnan = _gvn.transform(new (C) CmpDNode(arg, arg)); |
4314 // Build the boolean node | 3987 // Build the boolean node |
4315 Node *bolisnan = _gvn.transform( new (C) BoolNode( cmpisnan, BoolTest::ne ) ); | 3988 Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne)); |
4316 | 3989 |
4317 // Branch either way. | 3990 // Branch either way. |
4318 // NaN case is less traveled, which makes all the difference. | 3991 // NaN case is less traveled, which makes all the difference. |
4319 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); | 3992 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); |
4320 Node *opt_isnan = _gvn.transform(ifisnan); | 3993 Node *opt_isnan = _gvn.transform(ifisnan); |
4328 Node *slow_result = longcon(nan_bits); // return NaN | 4001 Node *slow_result = longcon(nan_bits); // return NaN |
4329 phi->init_req(1, _gvn.transform( slow_result )); | 4002 phi->init_req(1, _gvn.transform( slow_result )); |
4330 r->init_req(1, iftrue); | 4003 r->init_req(1, iftrue); |
4331 | 4004 |
4332 // Else fall through | 4005 // Else fall through |
4333 Node *iffalse = _gvn.transform( new (C) IfFalseNode(opt_ifisnan) ); | 4006 Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan)); |
4334 set_control(iffalse); | 4007 set_control(iffalse); |
4335 | 4008 |
4336 phi->init_req(2, _gvn.transform( new (C) MoveD2LNode(value))); | 4009 phi->init_req(2, _gvn.transform(new (C) MoveD2LNode(arg))); |
4337 r->init_req(2, iffalse); | 4010 r->init_req(2, iffalse); |
4338 | 4011 |
4339 // Post merge | 4012 // Post merge |
4340 set_control(_gvn.transform(r)); | 4013 set_control(_gvn.transform(r)); |
4341 record_for_igvn(r); | 4014 record_for_igvn(r); |
4342 | 4015 |
4343 Node* result = _gvn.transform(phi); | 4016 C->set_has_split_ifs(true); // Has chance for split-if optimization |
4017 result = phi; | |
4344 assert(result->bottom_type()->isa_long(), "must be"); | 4018 assert(result->bottom_type()->isa_long(), "must be"); |
4345 push_pair(result); | |
4346 | |
4347 C->set_has_split_ifs(true); // Has chance for split-if optimization | |
4348 | |
4349 break; | 4019 break; |
4350 } | 4020 } |
4351 | 4021 |
4352 case vmIntrinsics::_floatToIntBits: { | 4022 case vmIntrinsics::_floatToIntBits: { |
4353 Node* value = pop(); | |
4354 | |
4355 // two paths (plus control) merge in a wood | 4023 // two paths (plus control) merge in a wood |
4356 RegionNode *r = new (C) RegionNode(3); | 4024 RegionNode *r = new (C) RegionNode(3); |
4357 Node *phi = new (C) PhiNode(r, TypeInt::INT); | 4025 Node *phi = new (C) PhiNode(r, TypeInt::INT); |
4358 | 4026 |
4359 Node *cmpisnan = _gvn.transform( new (C) CmpFNode(value, value)); | 4027 Node *cmpisnan = _gvn.transform(new (C) CmpFNode(arg, arg)); |
4360 // Build the boolean node | 4028 // Build the boolean node |
4361 Node *bolisnan = _gvn.transform( new (C) BoolNode( cmpisnan, BoolTest::ne ) ); | 4029 Node *bolisnan = _gvn.transform(new (C) BoolNode(cmpisnan, BoolTest::ne)); |
4362 | 4030 |
4363 // Branch either way. | 4031 // Branch either way. |
4364 // NaN case is less traveled, which makes all the difference. | 4032 // NaN case is less traveled, which makes all the difference. |
4365 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); | 4033 IfNode *ifisnan = create_and_xform_if(control(), bolisnan, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); |
4366 Node *opt_isnan = _gvn.transform(ifisnan); | 4034 Node *opt_isnan = _gvn.transform(ifisnan); |
4374 Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN | 4042 Node *slow_result = makecon(TypeInt::make(nan_bits)); // return NaN |
4375 phi->init_req(1, _gvn.transform( slow_result )); | 4043 phi->init_req(1, _gvn.transform( slow_result )); |
4376 r->init_req(1, iftrue); | 4044 r->init_req(1, iftrue); |
4377 | 4045 |
4378 // Else fall through | 4046 // Else fall through |
4379 Node *iffalse = _gvn.transform( new (C) IfFalseNode(opt_ifisnan) ); | 4047 Node *iffalse = _gvn.transform(new (C) IfFalseNode(opt_ifisnan)); |
4380 set_control(iffalse); | 4048 set_control(iffalse); |
4381 | 4049 |
4382 phi->init_req(2, _gvn.transform( new (C) MoveF2INode(value))); | 4050 phi->init_req(2, _gvn.transform(new (C) MoveF2INode(arg))); |
4383 r->init_req(2, iffalse); | 4051 r->init_req(2, iffalse); |
4384 | 4052 |
4385 // Post merge | 4053 // Post merge |
4386 set_control(_gvn.transform(r)); | 4054 set_control(_gvn.transform(r)); |
4387 record_for_igvn(r); | 4055 record_for_igvn(r); |
4388 | 4056 |
4389 Node* result = _gvn.transform(phi); | 4057 C->set_has_split_ifs(true); // Has chance for split-if optimization |
4058 result = phi; | |
4390 assert(result->bottom_type()->isa_int(), "must be"); | 4059 assert(result->bottom_type()->isa_int(), "must be"); |
4391 push(result); | |
4392 | |
4393 C->set_has_split_ifs(true); // Has chance for split-if optimization | |
4394 | |
4395 break; | 4060 break; |
4396 } | 4061 } |
4397 | 4062 |
4398 default: | 4063 default: |
4399 ShouldNotReachHere(); | 4064 fatal_unexpected_iid(id); |
4400 } | 4065 break; |
4401 | 4066 } |
4067 set_result(_gvn.transform(result)); | |
4402 return true; | 4068 return true; |
4403 } | 4069 } |
4404 | 4070 |
4405 #ifdef _LP64 | 4071 #ifdef _LP64 |
4406 #define XTOP ,top() /*additional argument*/ | 4072 #define XTOP ,top() /*additional argument*/ |
4407 #else //_LP64 | 4073 #else //_LP64 |
4408 #define XTOP /*no additional argument*/ | 4074 #define XTOP /*no additional argument*/ |
4409 #endif //_LP64 | 4075 #endif //_LP64 |
4410 | 4076 |
4411 //----------------------inline_unsafe_copyMemory------------------------- | 4077 //----------------------inline_unsafe_copyMemory------------------------- |
4078 // public native void sun.misc.Unsafe.copyMemory(Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes); | |
4412 bool LibraryCallKit::inline_unsafe_copyMemory() { | 4079 bool LibraryCallKit::inline_unsafe_copyMemory() { |
4413 if (callee()->is_static()) return false; // caller must have the capability! | 4080 if (callee()->is_static()) return false; // caller must have the capability! |
4414 int nargs = 1 + 5 + 3; // 5 args: (src: ptr,off, dst: ptr,off, size) | 4081 null_check_receiver(); // null-check receiver |
4415 assert(signature()->size() == nargs-1, "copy has 5 arguments"); | |
4416 null_check_receiver(callee()); // check then ignore argument(0) | |
4417 if (stopped()) return true; | 4082 if (stopped()) return true; |
4418 | 4083 |
4419 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". | 4084 C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe". |
4420 | 4085 |
4421 Node* src_ptr = argument(1); | 4086 Node* src_ptr = argument(1); // type: oop |
4422 Node* src_off = ConvL2X(argument(2)); | 4087 Node* src_off = ConvL2X(argument(2)); // type: long |
4423 assert(argument(3)->is_top(), "2nd half of long"); | 4088 Node* dst_ptr = argument(4); // type: oop |
4424 Node* dst_ptr = argument(4); | 4089 Node* dst_off = ConvL2X(argument(5)); // type: long |
4425 Node* dst_off = ConvL2X(argument(5)); | 4090 Node* size = ConvL2X(argument(7)); // type: long |
4426 assert(argument(6)->is_top(), "2nd half of long"); | |
4427 Node* size = ConvL2X(argument(7)); | |
4428 assert(argument(8)->is_top(), "2nd half of long"); | |
4429 | 4091 |
4430 assert(Unsafe_field_offset_to_byte_offset(11) == 11, | 4092 assert(Unsafe_field_offset_to_byte_offset(11) == 11, |
4431 "fieldOffset must be byte-scaled"); | 4093 "fieldOffset must be byte-scaled"); |
4432 | 4094 |
4433 Node* src = make_unsafe_address(src_ptr, src_off); | 4095 Node* src = make_unsafe_address(src_ptr, src_off); |
4543 insert_mem_bar(Op_MemBarCPUOrder); | 4205 insert_mem_bar(Op_MemBarCPUOrder); |
4544 } | 4206 } |
4545 } | 4207 } |
4546 | 4208 |
4547 //------------------------inline_native_clone---------------------------- | 4209 //------------------------inline_native_clone---------------------------- |
4210 // protected native Object java.lang.Object.clone(); | |
4211 // | |
4548 // Here are the simple edge cases: | 4212 // Here are the simple edge cases: |
4549 // null receiver => normal trap | 4213 // null receiver => normal trap |
4550 // virtual and clone was overridden => slow path to out-of-line clone | 4214 // virtual and clone was overridden => slow path to out-of-line clone |
4551 // not cloneable or finalizer => slow path to out-of-line Object.clone | 4215 // not cloneable or finalizer => slow path to out-of-line Object.clone |
4552 // | 4216 // |
4559 // | 4223 // |
4560 // These steps fold up nicely if and when the cloned object's klass | 4224 // These steps fold up nicely if and when the cloned object's klass |
4561 // can be sharply typed as an object array, a type array, or an instance. | 4225 // can be sharply typed as an object array, a type array, or an instance. |
4562 // | 4226 // |
4563 bool LibraryCallKit::inline_native_clone(bool is_virtual) { | 4227 bool LibraryCallKit::inline_native_clone(bool is_virtual) { |
4564 int nargs = 1; | |
4565 PhiNode* result_val; | 4228 PhiNode* result_val; |
4566 | 4229 |
4567 //set the original stack and the reexecute bit for the interpreter to reexecute | 4230 // Set the reexecute bit for the interpreter to reexecute |
4568 //the bytecode that invokes Object.clone if deoptimization happens | 4231 // the bytecode that invokes Object.clone if deoptimization happens. |
4569 { PreserveReexecuteState preexecs(this); | 4232 { PreserveReexecuteState preexecs(this); |
4570 jvms()->set_should_reexecute(true); | 4233 jvms()->set_should_reexecute(true); |
4571 | 4234 |
4572 //null_check_receiver will adjust _sp (push and pop) | 4235 Node* obj = null_check_receiver(); |
4573 Node* obj = null_check_receiver(callee()); | |
4574 if (stopped()) return true; | 4236 if (stopped()) return true; |
4575 | |
4576 _sp += nargs; | |
4577 | 4237 |
4578 Node* obj_klass = load_object_klass(obj); | 4238 Node* obj_klass = load_object_klass(obj); |
4579 const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr(); | 4239 const TypeKlassPtr* tklass = _gvn.type(obj_klass)->isa_klassptr(); |
4580 const TypeOopPtr* toop = ((tklass != NULL) | 4240 const TypeOopPtr* toop = ((tklass != NULL) |
4581 ? tklass->as_instance_type() | 4241 ? tklass->as_instance_type() |
4609 // It's an array. | 4269 // It's an array. |
4610 PreserveJVMState pjvms(this); | 4270 PreserveJVMState pjvms(this); |
4611 set_control(array_ctl); | 4271 set_control(array_ctl); |
4612 Node* obj_length = load_array_length(obj); | 4272 Node* obj_length = load_array_length(obj); |
4613 Node* obj_size = NULL; | 4273 Node* obj_size = NULL; |
4614 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); | 4274 Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push |
4615 | 4275 |
4616 if (!use_ReduceInitialCardMarks()) { | 4276 if (!use_ReduceInitialCardMarks()) { |
4617 // If it is an oop array, it requires very special treatment, | 4277 // If it is an oop array, it requires very special treatment, |
4618 // because card marking is required on each card of the array. | 4278 // because card marking is required on each card of the array. |
4619 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); | 4279 Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); |
4709 | 4369 |
4710 // Return the combined state. | 4370 // Return the combined state. |
4711 set_control( _gvn.transform(result_reg) ); | 4371 set_control( _gvn.transform(result_reg) ); |
4712 set_i_o( _gvn.transform(result_i_o) ); | 4372 set_i_o( _gvn.transform(result_i_o) ); |
4713 set_all_memory( _gvn.transform(result_mem) ); | 4373 set_all_memory( _gvn.transform(result_mem) ); |
4714 } //original reexecute and sp are set back here | 4374 } // original reexecute is set back here |
4715 | 4375 |
4716 push(_gvn.transform(result_val)); | 4376 set_result(_gvn.transform(result_val)); |
4717 | |
4718 return true; | 4377 return true; |
4719 } | 4378 } |
4720 | 4379 |
4721 //------------------------------basictype2arraycopy---------------------------- | 4380 //------------------------------basictype2arraycopy---------------------------- |
4722 address LibraryCallKit::basictype2arraycopy(BasicType t, | 4381 address LibraryCallKit::basictype2arraycopy(BasicType t, |
4753 return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized); | 4412 return StubRoutines::select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized); |
4754 } | 4413 } |
4755 | 4414 |
4756 | 4415 |
4757 //------------------------------inline_arraycopy----------------------- | 4416 //------------------------------inline_arraycopy----------------------- |
4417 // public static native void java.lang.System.arraycopy(Object src, int srcPos, | |
4418 // Object dest, int destPos, | |
4419 // int length); | |
4758 bool LibraryCallKit::inline_arraycopy() { | 4420 bool LibraryCallKit::inline_arraycopy() { |
4759 // Restore the stack and pop off the arguments. | 4421 // Get the arguments. |
4760 int nargs = 5; // 2 oops, 3 ints, no size_t or long | 4422 Node* src = argument(0); // type: oop |
4761 assert(callee()->signature()->size() == nargs, "copy has 5 arguments"); | 4423 Node* src_offset = argument(1); // type: int |
4762 | 4424 Node* dest = argument(2); // type: oop |
4763 Node *src = argument(0); | 4425 Node* dest_offset = argument(3); // type: int |
4764 Node *src_offset = argument(1); | 4426 Node* length = argument(4); // type: int |
4765 Node *dest = argument(2); | |
4766 Node *dest_offset = argument(3); | |
4767 Node *length = argument(4); | |
4768 | 4427 |
4769 // Compile time checks. If any of these checks cannot be verified at compile time, | 4428 // Compile time checks. If any of these checks cannot be verified at compile time, |
4770 // we do not make a fast path for this call. Instead, we let the call remain as it | 4429 // we do not make a fast path for this call. Instead, we let the call remain as it |
4771 // is. The checks we choose to mandate at compile time are: | 4430 // is. The checks we choose to mandate at compile time are: |
4772 // | 4431 // |
4773 // (1) src and dest are arrays. | 4432 // (1) src and dest are arrays. |
4774 const Type* src_type = src->Value(&_gvn); | 4433 const Type* src_type = src->Value(&_gvn); |
4775 const Type* dest_type = dest->Value(&_gvn); | 4434 const Type* dest_type = dest->Value(&_gvn); |
4776 const TypeAryPtr* top_src = src_type->isa_aryptr(); | 4435 const TypeAryPtr* top_src = src_type->isa_aryptr(); |
4777 const TypeAryPtr* top_dest = dest_type->isa_aryptr(); | 4436 const TypeAryPtr* top_dest = dest_type->isa_aryptr(); |
4778 if (top_src == NULL || top_src->klass() == NULL || | 4437 if (top_src == NULL || top_src->klass() == NULL || |
4779 top_dest == NULL || top_dest->klass() == NULL) { | 4438 top_dest == NULL || top_dest->klass() == NULL) { |
4780 // Conservatively insert a memory barrier on all memory slices. | 4439 // Conservatively insert a memory barrier on all memory slices. |
4781 // Do not let writes into the source float below the arraycopy. | 4440 // Do not let writes into the source float below the arraycopy. |
4826 | 4485 |
4827 RegionNode* slow_region = new (C) RegionNode(1); | 4486 RegionNode* slow_region = new (C) RegionNode(1); |
4828 record_for_igvn(slow_region); | 4487 record_for_igvn(slow_region); |
4829 | 4488 |
4830 // (3) operands must not be null | 4489 // (3) operands must not be null |
4831 // We currently perform our null checks with the do_null_check routine. | 4490 // We currently perform our null checks with the null_check routine. |
4832 // This means that the null exceptions will be reported in the caller | 4491 // This means that the null exceptions will be reported in the caller |
4833 // rather than (correctly) reported inside of the native arraycopy call. | 4492 // rather than (correctly) reported inside of the native arraycopy call. |
4834 // This should be corrected, given time. We do our null check with the | 4493 // This should be corrected, given time. We do our null check with the |
4835 // stack pointer restored. | 4494 // stack pointer restored. |
4836 _sp += nargs; | 4495 src = null_check(src, T_ARRAY); |
4837 src = do_null_check(src, T_ARRAY); | 4496 dest = null_check(dest, T_ARRAY); |
4838 dest = do_null_check(dest, T_ARRAY); | |
4839 _sp -= nargs; | |
4840 | 4497 |
4841 // (4) src_offset must not be negative. | 4498 // (4) src_offset must not be negative. |
4842 generate_negative_guard(src_offset, slow_region); | 4499 generate_negative_guard(src_offset, slow_region); |
4843 | 4500 |
4844 // (5) dest_offset must not be negative. | 4501 // (5) dest_offset must not be negative. |
5177 | 4834 |
5178 // Here are all the slow paths up to this point, in one bundle: | 4835 // Here are all the slow paths up to this point, in one bundle: |
5179 slow_control = top(); | 4836 slow_control = top(); |
5180 if (slow_region != NULL) | 4837 if (slow_region != NULL) |
5181 slow_control = _gvn.transform(slow_region); | 4838 slow_control = _gvn.transform(slow_region); |
5182 debug_only(slow_region = (RegionNode*)badAddress); | 4839 DEBUG_ONLY(slow_region = (RegionNode*)badAddress); |
5183 | 4840 |
5184 set_control(checked_control); | 4841 set_control(checked_control); |
5185 if (!stopped()) { | 4842 if (!stopped()) { |
5186 // Clean up after the checked call. | 4843 // Clean up after the checked call. |
5187 // The returned value is either 0 or -1^K, | 4844 // The returned value is either 0 or -1^K, |
5672 copyfunc_addr, copyfunc_name, adr_type, | 5329 copyfunc_addr, copyfunc_name, adr_type, |
5673 src_start, dest_start, copy_length XTOP); | 5330 src_start, dest_start, copy_length XTOP); |
5674 } | 5331 } |
5675 | 5332 |
5676 //----------------------------inline_reference_get---------------------------- | 5333 //----------------------------inline_reference_get---------------------------- |
5677 | 5334 // public T java.lang.ref.Reference.get(); |
5678 bool LibraryCallKit::inline_reference_get() { | 5335 bool LibraryCallKit::inline_reference_get() { |
5679 const int nargs = 1; // self | 5336 const int referent_offset = java_lang_ref_Reference::referent_offset; |
5680 | 5337 guarantee(referent_offset > 0, "should have already been set"); |
5681 guarantee(java_lang_ref_Reference::referent_offset > 0, | 5338 |
5682 "should have already been set"); | 5339 // Get the argument: |
5683 | 5340 Node* reference_obj = null_check_receiver(); |
5684 int referent_offset = java_lang_ref_Reference::referent_offset; | |
5685 | |
5686 // Restore the stack and pop off the argument | |
5687 _sp += nargs; | |
5688 Node *reference_obj = pop(); | |
5689 | |
5690 // Null check on self without removing any arguments. | |
5691 _sp += nargs; | |
5692 reference_obj = do_null_check(reference_obj, T_OBJECT); | |
5693 _sp -= nargs;; | |
5694 | |
5695 if (stopped()) return true; | 5341 if (stopped()) return true; |
5696 | 5342 |
5697 Node *adr = basic_plus_adr(reference_obj, reference_obj, referent_offset); | 5343 Node* adr = basic_plus_adr(reference_obj, reference_obj, referent_offset); |
5698 | 5344 |
5699 ciInstanceKlass* klass = env()->Object_klass(); | 5345 ciInstanceKlass* klass = env()->Object_klass(); |
5700 const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass); | 5346 const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass); |
5701 | 5347 |
5702 Node* no_ctrl = NULL; | 5348 Node* no_ctrl = NULL; |
5703 Node *result = make_load(no_ctrl, adr, object_type, T_OBJECT); | 5349 Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT); |
5704 | 5350 |
5705 // Use the pre-barrier to record the value in the referent field | 5351 // Use the pre-barrier to record the value in the referent field |
5706 pre_barrier(false /* do_load */, | 5352 pre_barrier(false /* do_load */, |
5707 control(), | 5353 control(), |
5708 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, | 5354 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, |
5711 | 5357 |
5712 // Add memory barrier to prevent commoning reads from this field | 5358 // Add memory barrier to prevent commoning reads from this field |
5713 // across safepoint since GC can change its value. | 5359 // across safepoint since GC can change its value. |
5714 insert_mem_bar(Op_MemBarCPUOrder); | 5360 insert_mem_bar(Op_MemBarCPUOrder); |
5715 | 5361 |
5716 push(result); | 5362 set_result(result); |
5717 return true; | 5363 return true; |
5718 } | 5364 } |
5719 | 5365 |
5720 | 5366 |
5721 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, | 5367 Node * LibraryCallKit::load_field_from_object(Node * fromObj, const char * fieldName, const char * fieldTypeString, |
5768 stubName = "aescrypt_decryptBlock"; | 5414 stubName = "aescrypt_decryptBlock"; |
5769 break; | 5415 break; |
5770 } | 5416 } |
5771 if (stubAddr == NULL) return false; | 5417 if (stubAddr == NULL) return false; |
5772 | 5418 |
5773 // Restore the stack and pop off the arguments. | 5419 Node* aescrypt_object = argument(0); |
5774 int nargs = 5; // this + 2 oop/offset combos | 5420 Node* src = argument(1); |
5775 assert(callee()->signature()->size() == nargs-1, "encryptBlock has 4 arguments"); | 5421 Node* src_offset = argument(2); |
5776 | 5422 Node* dest = argument(3); |
5777 Node *aescrypt_object = argument(0); | 5423 Node* dest_offset = argument(4); |
5778 Node *src = argument(1); | |
5779 Node *src_offset = argument(2); | |
5780 Node *dest = argument(3); | |
5781 Node *dest_offset = argument(4); | |
5782 | 5424 |
5783 // (1) src and dest are arrays. | 5425 // (1) src and dest are arrays. |
5784 const Type* src_type = src->Value(&_gvn); | 5426 const Type* src_type = src->Value(&_gvn); |
5785 const Type* dest_type = dest->Value(&_gvn); | 5427 const Type* dest_type = dest->Value(&_gvn); |
5786 const TypeAryPtr* top_src = src_type->isa_aryptr(); | 5428 const TypeAryPtr* top_src = src_type->isa_aryptr(); |
5827 stubName = "cipherBlockChaining_decryptAESCrypt"; | 5469 stubName = "cipherBlockChaining_decryptAESCrypt"; |
5828 break; | 5470 break; |
5829 } | 5471 } |
5830 if (stubAddr == NULL) return false; | 5472 if (stubAddr == NULL) return false; |
5831 | 5473 |
5832 | 5474 Node* cipherBlockChaining_object = argument(0); |
5833 // Restore the stack and pop off the arguments. | 5475 Node* src = argument(1); |
5834 int nargs = 6; // this + oop/offset + len + oop/offset | 5476 Node* src_offset = argument(2); |
5835 assert(callee()->signature()->size() == nargs-1, "wrong number of arguments"); | 5477 Node* len = argument(3); |
5836 Node *cipherBlockChaining_object = argument(0); | 5478 Node* dest = argument(4); |
5837 Node *src = argument(1); | 5479 Node* dest_offset = argument(5); |
5838 Node *src_offset = argument(2); | |
5839 Node *len = argument(3); | |
5840 Node *dest = argument(4); | |
5841 Node *dest_offset = argument(5); | |
5842 | 5480 |
5843 // (1) src and dest are arrays. | 5481 // (1) src and dest are arrays. |
5844 const Type* src_type = src->Value(&_gvn); | 5482 const Type* src_type = src->Value(&_gvn); |
5845 const Type* dest_type = dest->Value(&_gvn); | 5483 const Type* dest_type = dest->Value(&_gvn); |
5846 const TypeAryPtr* top_src = src_type->isa_aryptr(); | 5484 const TypeAryPtr* top_src = src_type->isa_aryptr(); |
5918 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath | 5556 // if ((embeddedCipherObj instanceof AESCrypt) && (cipher!=plain)) do_intrinsic, else do_javapath |
5919 // note cipher==plain is more conservative than the original java code but that's OK | 5557 // note cipher==plain is more conservative than the original java code but that's OK |
5920 // | 5558 // |
5921 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) { | 5559 Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting) { |
5922 // First, check receiver for NULL since it is virtual method. | 5560 // First, check receiver for NULL since it is virtual method. |
5923 int nargs = arg_size(); | |
5924 Node* objCBC = argument(0); | 5561 Node* objCBC = argument(0); |
5925 _sp += nargs; | 5562 objCBC = null_check(objCBC); |
5926 objCBC = do_null_check(objCBC, T_OBJECT); | |
5927 _sp -= nargs; | |
5928 | 5563 |
5929 if (stopped()) return NULL; // Always NULL | 5564 if (stopped()) return NULL; // Always NULL |
5930 | 5565 |
5931 // Load embeddedCipher field of CipherBlockChaining object. | 5566 // Load embeddedCipher field of CipherBlockChaining object. |
5932 Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false); | 5567 Node* embeddedCipherObj = load_field_from_object(objCBC, "embeddedCipher", "Lcom/sun/crypto/provider/SymmetricCipher;", /*is_exact*/ false); |
5946 set_control(top()); // no regular fast path | 5581 set_control(top()); // no regular fast path |
5947 return ctrl; | 5582 return ctrl; |
5948 } | 5583 } |
5949 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass(); | 5584 ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass(); |
5950 | 5585 |
5951 _sp += nargs; // gen_instanceof might do an uncommon trap | |
5952 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt))); | 5586 Node* instof = gen_instanceof(embeddedCipherObj, makecon(TypeKlassPtr::make(instklass_AESCrypt))); |
5953 _sp -= nargs; | |
5954 Node* cmp_instof = _gvn.transform(new (C) CmpINode(instof, intcon(1))); | 5587 Node* cmp_instof = _gvn.transform(new (C) CmpINode(instof, intcon(1))); |
5955 Node* bool_instof = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne)); | 5588 Node* bool_instof = _gvn.transform(new (C) BoolNode(cmp_instof, BoolTest::ne)); |
5956 | 5589 |
5957 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN); | 5590 Node* instof_false = generate_guard(bool_instof, NULL, PROB_MIN); |
5958 | 5591 |
5964 // taking the intrinsic path when cipher and plain are the same | 5597 // taking the intrinsic path when cipher and plain are the same |
5965 // see the original java code for why. | 5598 // see the original java code for why. |
5966 RegionNode* region = new(C) RegionNode(3); | 5599 RegionNode* region = new(C) RegionNode(3); |
5967 region->init_req(1, instof_false); | 5600 region->init_req(1, instof_false); |
5968 Node* src = argument(1); | 5601 Node* src = argument(1); |
5969 Node *dest = argument(4); | 5602 Node* dest = argument(4); |
5970 Node* cmp_src_dest = _gvn.transform(new (C) CmpPNode(src, dest)); | 5603 Node* cmp_src_dest = _gvn.transform(new (C) CmpPNode(src, dest)); |
5971 Node* bool_src_dest = _gvn.transform(new (C) BoolNode(cmp_src_dest, BoolTest::eq)); | 5604 Node* bool_src_dest = _gvn.transform(new (C) BoolNode(cmp_src_dest, BoolTest::eq)); |
5972 Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN); | 5605 Node* src_dest_conjoint = generate_guard(bool_src_dest, NULL, PROB_MIN); |
5973 region->init_req(2, src_dest_conjoint); | 5606 region->init_req(2, src_dest_conjoint); |
5974 | 5607 |
5975 record_for_igvn(region); | 5608 record_for_igvn(region); |
5976 return _gvn.transform(region); | 5609 return _gvn.transform(region); |
5977 | 5610 } |
5978 } | |
5979 | |
5980 |