comparison src/share/vm/opto/library_call.cpp @ 6143:8b0a4867acf0

7174218: remove AtomicLongCSImpl intrinsics Reviewed-by: kvn, twisti Contributed-by: Krystal Mok <sajia@taobao.com>
author twisti
date Tue, 12 Jun 2012 14:31:44 -0700
parents 6759698e3140
children eeb819cf36e5
comparison
equal deleted inserted replaced
6142:121e5708ae96 6143:8b0a4867acf0
190 bool inline_array_copyOf(bool is_copyOfRange); 190 bool inline_array_copyOf(bool is_copyOfRange);
191 bool inline_array_equals(); 191 bool inline_array_equals();
192 void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark); 192 void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
193 bool inline_native_clone(bool is_virtual); 193 bool inline_native_clone(bool is_virtual);
194 bool inline_native_Reflection_getCallerClass(); 194 bool inline_native_Reflection_getCallerClass();
195 bool inline_native_AtomicLong_get();
196 bool inline_native_AtomicLong_attemptUpdate();
197 bool is_method_invoke_or_aux_frame(JVMState* jvms); 195 bool is_method_invoke_or_aux_frame(JVMState* jvms);
198 // Helper function for inlining native object hash method 196 // Helper function for inlining native object hash method
199 bool inline_native_hashcode(bool is_virtual, bool is_static); 197 bool inline_native_hashcode(bool is_virtual, bool is_static);
200 bool inline_native_getClass(); 198 bool inline_native_getClass();
201 199
329 break; 327 break;
330 case vmIntrinsics::_checkIndex: 328 case vmIntrinsics::_checkIndex:
331 // We do not intrinsify this. The optimizer does fine with it. 329 // We do not intrinsify this. The optimizer does fine with it.
332 return NULL; 330 return NULL;
333 331
334 case vmIntrinsics::_get_AtomicLong:
335 case vmIntrinsics::_attemptUpdate:
336 if (!InlineAtomicLong) return NULL;
337 break;
338
339 case vmIntrinsics::_getCallerClass: 332 case vmIntrinsics::_getCallerClass:
340 if (!UseNewReflection) return NULL; 333 if (!UseNewReflection) return NULL;
341 if (!InlineReflectionGetCallerClass) return NULL; 334 if (!InlineReflectionGetCallerClass) return NULL;
342 if (!JDK_Version::is_gte_jdk14x_version()) return NULL; 335 if (!JDK_Version::is_gte_jdk14x_version()) return NULL;
343 break; 336 break;
708 case vmIntrinsics::_reverseBytes_i: 701 case vmIntrinsics::_reverseBytes_i:
709 case vmIntrinsics::_reverseBytes_l: 702 case vmIntrinsics::_reverseBytes_l:
710 case vmIntrinsics::_reverseBytes_s: 703 case vmIntrinsics::_reverseBytes_s:
711 case vmIntrinsics::_reverseBytes_c: 704 case vmIntrinsics::_reverseBytes_c:
712 return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id()); 705 return inline_reverseBytes((vmIntrinsics::ID) intrinsic_id());
713
714 case vmIntrinsics::_get_AtomicLong:
715 return inline_native_AtomicLong_get();
716 case vmIntrinsics::_attemptUpdate:
717 return inline_native_AtomicLong_attemptUpdate();
718 706
719 case vmIntrinsics::_getCallerClass: 707 case vmIntrinsics::_getCallerClass:
720 return inline_native_Reflection_getCallerClass(); 708 return inline_native_Reflection_getCallerClass();
721 709
722 case vmIntrinsics::_Reference_get: 710 case vmIntrinsics::_Reference_get:
4004 } 3992 }
4005 3993
4006 return false; 3994 return false;
4007 } 3995 }
4008 3996
4009 static int value_field_offset = -1; // offset of the "value" field of AtomicLongCSImpl. This is needed by
4010 // inline_native_AtomicLong_attemptUpdate() but it has no way of
4011 // computing it since there is no lookup field by name function in the
4012 // CI interface. This is computed and set by inline_native_AtomicLong_get().
4013 // Using a static variable here is safe even if we have multiple compilation
4014 // threads because the offset is constant. At worst the same offset will be
4015 // computed and stored multiple
4016
4017 bool LibraryCallKit::inline_native_AtomicLong_get() {
4018 // Restore the stack and pop off the argument
4019 _sp+=1;
4020 Node *obj = pop();
4021
4022 // get the offset of the "value" field. Since the CI interfaces
4023 // does not provide a way to look up a field by name, we scan the bytecodes
4024 // to get the field index. We expect the first 2 instructions of the method
4025 // to be:
4026 // 0 aload_0
4027 // 1 getfield "value"
4028 ciMethod* method = callee();
4029 if (value_field_offset == -1)
4030 {
4031 ciField* value_field;
4032 ciBytecodeStream iter(method);
4033 Bytecodes::Code bc = iter.next();
4034
4035 if ((bc != Bytecodes::_aload_0) &&
4036 ((bc != Bytecodes::_aload) || (iter.get_index() != 0)))
4037 return false;
4038 bc = iter.next();
4039 if (bc != Bytecodes::_getfield)
4040 return false;
4041 bool ignore;
4042 value_field = iter.get_field(ignore);
4043 value_field_offset = value_field->offset_in_bytes();
4044 }
4045
4046 // Null check without removing any arguments.
4047 _sp++;
4048 obj = do_null_check(obj, T_OBJECT);
4049 _sp--;
4050 // Check for locking null object
4051 if (stopped()) return true;
4052
4053 Node *adr = basic_plus_adr(obj, obj, value_field_offset);
4054 const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
4055 int alias_idx = C->get_alias_index(adr_type);
4056
4057 Node *result = _gvn.transform(new (C, 3) LoadLLockedNode(control(), memory(alias_idx), adr));
4058
4059 push_pair(result);
4060
4061 return true;
4062 }
4063
4064 bool LibraryCallKit::inline_native_AtomicLong_attemptUpdate() {
4065 // Restore the stack and pop off the arguments
4066 _sp+=5;
4067 Node *newVal = pop_pair();
4068 Node *oldVal = pop_pair();
4069 Node *obj = pop();
4070
4071 // we need the offset of the "value" field which was computed when
4072 // inlining the get() method. Give up if we don't have it.
4073 if (value_field_offset == -1)
4074 return false;
4075
4076 // Null check without removing any arguments.
4077 _sp+=5;
4078 obj = do_null_check(obj, T_OBJECT);
4079 _sp-=5;
4080 // Check for locking null object
4081 if (stopped()) return true;
4082
4083 Node *adr = basic_plus_adr(obj, obj, value_field_offset);
4084 const TypePtr *adr_type = _gvn.type(adr)->is_ptr();
4085 int alias_idx = C->get_alias_index(adr_type);
4086
4087 Node *cas = _gvn.transform(new (C, 5) StoreLConditionalNode(control(), memory(alias_idx), adr, newVal, oldVal));
4088 Node *store_proj = _gvn.transform( new (C, 1) SCMemProjNode(cas));
4089 set_memory(store_proj, alias_idx);
4090 Node *bol = _gvn.transform( new (C, 2) BoolNode( cas, BoolTest::eq ) );
4091
4092 Node *result;
4093 // CMove node is not used to be able fold a possible check code
4094 // after attemptUpdate() call. This code could be transformed
4095 // into CMove node by loop optimizations.
4096 {
4097 RegionNode *r = new (C, 3) RegionNode(3);
4098 result = new (C, 3) PhiNode(r, TypeInt::BOOL);
4099
4100 Node *iff = create_and_xform_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
4101 Node *iftrue = opt_iff(r, iff);
4102 r->init_req(1, iftrue);
4103 result->init_req(1, intcon(1));
4104 result->init_req(2, intcon(0));
4105
4106 set_control(_gvn.transform(r));
4107 record_for_igvn(r);
4108
4109 C->set_has_split_ifs(true); // Has chance for split-if optimization
4110 }
4111
4112 push(_gvn.transform(result));
4113 return true;
4114 }
4115
4116 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) { 3997 bool LibraryCallKit::inline_fp_conversions(vmIntrinsics::ID id) {
4117 // restore the arguments 3998 // restore the arguments
4118 _sp += arg_size(); 3999 _sp += arg_size();
4119 4000
4120 switch (id) { 4001 switch (id) {