Mercurial > hg > truffle
comparison src/share/vm/opto/library_call.cpp @ 17968:dbf0d88d867d
8011646: SEGV in compiled code with loop predication
Summary: Remove control edge of load node to ensure that castPP removal sets the control edge correctly
Reviewed-by: kvn, roland
author | anoll |
---|---|
date | Fri, 30 May 2014 06:50:38 +0200 |
parents | a062c3691003 |
children | 00c8a1255912 |
comparison
equal
deleted
inserted
replaced
17964:660b3f6bf7d7 | 17968:dbf0d88d867d |
---|---|
3976 set_edges_for_java_call(slow_call); | 3976 set_edges_for_java_call(slow_call); |
3977 return slow_call; | 3977 return slow_call; |
3978 } | 3978 } |
3979 | 3979 |
3980 | 3980 |
3981 //------------------------------inline_native_hashcode-------------------- | 3981 /** |
3982 // Build special case code for calls to hashCode on an object. | 3982 * Build special case code for calls to hashCode on an object. This call may |
3983 * be virtual (invokevirtual) or bound (invokespecial). For each case we generate | |
3984 * slightly different code. | |
3985 */ | |
3983 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) { | 3986 bool LibraryCallKit::inline_native_hashcode(bool is_virtual, bool is_static) { |
3984 assert(is_static == callee()->is_static(), "correct intrinsic selection"); | 3987 assert(is_static == callee()->is_static(), "correct intrinsic selection"); |
3985 assert(!(is_virtual && is_static), "either virtual, special, or static"); | 3988 assert(!(is_virtual && is_static), "either virtual, special, or static"); |
3986 | 3989 |
3987 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT }; | 3990 enum { _slow_path = 1, _fast_path, _null_path, PATH_LIMIT }; |
3988 | 3991 |
3989 RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT); | 3992 RegionNode* result_reg = new(C) RegionNode(PATH_LIMIT); |
3990 PhiNode* result_val = new(C) PhiNode(result_reg, | 3993 PhiNode* result_val = new(C) PhiNode(result_reg, TypeInt::INT); |
3991 TypeInt::INT); | |
3992 PhiNode* result_io = new(C) PhiNode(result_reg, Type::ABIO); | 3994 PhiNode* result_io = new(C) PhiNode(result_reg, Type::ABIO); |
3993 PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY, | 3995 PhiNode* result_mem = new(C) PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM); |
3994 TypePtr::BOTTOM); | |
3995 Node* obj = NULL; | 3996 Node* obj = NULL; |
3996 if (!is_static) { | 3997 if (!is_static) { |
3997 // Check for hashing null object | 3998 // Check for hashing null object |
3998 obj = null_check_receiver(); | 3999 obj = null_check_receiver(); |
3999 if (stopped()) return true; // unconditionally null | 4000 if (stopped()) return true; // unconditionally null |
4015 if (!stopped()) | 4016 if (!stopped()) |
4016 set_result(result_val->in(_null_path)); | 4017 set_result(result_val->in(_null_path)); |
4017 return true; | 4018 return true; |
4018 } | 4019 } |
4019 | 4020 |
4020 // After null check, get the object's klass. | |
4021 Node* obj_klass = load_object_klass(obj); | |
4022 | |
4023 // This call may be virtual (invokevirtual) or bound (invokespecial). | |
4024 // For each case we generate slightly different code. | |
4025 | |
4026 // We only go to the fast case code if we pass a number of guards. The | 4021 // We only go to the fast case code if we pass a number of guards. The |
4027 // paths which do not pass are accumulated in the slow_region. | 4022 // paths which do not pass are accumulated in the slow_region. |
4028 RegionNode* slow_region = new (C) RegionNode(1); | 4023 RegionNode* slow_region = new (C) RegionNode(1); |
4029 record_for_igvn(slow_region); | 4024 record_for_igvn(slow_region); |
4030 | 4025 |
4033 // If the target method which we are calling happens to be the native | 4028 // If the target method which we are calling happens to be the native |
4034 // Object hashCode() method, we pass the guard. We do not need this | 4029 // Object hashCode() method, we pass the guard. We do not need this |
4035 // guard for non-virtual calls -- the caller is known to be the native | 4030 // guard for non-virtual calls -- the caller is known to be the native |
4036 // Object hashCode(). | 4031 // Object hashCode(). |
4037 if (is_virtual) { | 4032 if (is_virtual) { |
4033 // After null check, get the object's klass. | |
4034 Node* obj_klass = load_object_klass(obj); | |
4038 generate_virtual_guard(obj_klass, slow_region); | 4035 generate_virtual_guard(obj_klass, slow_region); |
4039 } | 4036 } |
4040 | 4037 |
4041 // Get the header out of the object, use LoadMarkNode when available | 4038 // Get the header out of the object, use LoadMarkNode when available |
4042 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); | 4039 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); |
4043 Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); | 4040 // The control of the load must be NULL. Otherwise, the load can move before |
4041 // the null check after castPP removal. | |
4042 Node* no_ctrl = NULL; | |
4043 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); | |
4044 | 4044 |
4045 // Test the header to see if it is unlocked. | 4045 // Test the header to see if it is unlocked. |
4046 Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); | 4046 Node* lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); |
4047 Node *lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask)); | 4047 Node* lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask)); |
4048 Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value); | 4048 Node* unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value); |
4049 Node *chk_unlocked = _gvn.transform(new (C) CmpXNode( lmasked_header, unlocked_val)); | 4049 Node* chk_unlocked = _gvn.transform(new (C) CmpXNode( lmasked_header, unlocked_val)); |
4050 Node *test_unlocked = _gvn.transform(new (C) BoolNode( chk_unlocked, BoolTest::ne)); | 4050 Node* test_unlocked = _gvn.transform(new (C) BoolNode( chk_unlocked, BoolTest::ne)); |
4051 | 4051 |
4052 generate_slow_guard(test_unlocked, slow_region); | 4052 generate_slow_guard(test_unlocked, slow_region); |
4053 | 4053 |
4054 // Get the hash value and check to see that it has been properly assigned. | 4054 // Get the hash value and check to see that it has been properly assigned. |
4055 // We depend on hash_mask being at most 32 bits and avoid the use of | 4055 // We depend on hash_mask being at most 32 bits and avoid the use of |
4056 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit | 4056 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit |
4057 // vm: see markOop.hpp. | 4057 // vm: see markOop.hpp. |
4058 Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask); | 4058 Node* hash_mask = _gvn.intcon(markOopDesc::hash_mask); |
4059 Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift); | 4059 Node* hash_shift = _gvn.intcon(markOopDesc::hash_shift); |
4060 Node *hshifted_header= _gvn.transform(new (C) URShiftXNode(header, hash_shift)); | 4060 Node* hshifted_header= _gvn.transform(new (C) URShiftXNode(header, hash_shift)); |
4061 // This hack lets the hash bits live anywhere in the mark object now, as long | 4061 // This hack lets the hash bits live anywhere in the mark object now, as long |
4062 // as the shift drops the relevant bits into the low 32 bits. Note that | 4062 // as the shift drops the relevant bits into the low 32 bits. Note that |
4063 // Java spec says that HashCode is an int so there's no point in capturing | 4063 // Java spec says that HashCode is an int so there's no point in capturing |
4064 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build). | 4064 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build). |
4065 hshifted_header = ConvX2I(hshifted_header); | 4065 hshifted_header = ConvX2I(hshifted_header); |
4066 Node *hash_val = _gvn.transform(new (C) AndINode(hshifted_header, hash_mask)); | 4066 Node* hash_val = _gvn.transform(new (C) AndINode(hshifted_header, hash_mask)); |
4067 | 4067 |
4068 Node *no_hash_val = _gvn.intcon(markOopDesc::no_hash); | 4068 Node* no_hash_val = _gvn.intcon(markOopDesc::no_hash); |
4069 Node *chk_assigned = _gvn.transform(new (C) CmpINode( hash_val, no_hash_val)); | 4069 Node* chk_assigned = _gvn.transform(new (C) CmpINode( hash_val, no_hash_val)); |
4070 Node *test_assigned = _gvn.transform(new (C) BoolNode( chk_assigned, BoolTest::eq)); | 4070 Node* test_assigned = _gvn.transform(new (C) BoolNode( chk_assigned, BoolTest::eq)); |
4071 | 4071 |
4072 generate_slow_guard(test_assigned, slow_region); | 4072 generate_slow_guard(test_assigned, slow_region); |
4073 | 4073 |
4074 Node* init_mem = reset_memory(); | 4074 Node* init_mem = reset_memory(); |
4075 // fill in the rest of the null path: | 4075 // fill in the rest of the null path: |