comparison src/share/vm/opto/library_call.cpp @ 605:98cb887364d3

6810672: Comment typos Summary: I have collected some typos I have found while looking at the code. Reviewed-by: kvn, never
author twisti
date Fri, 27 Feb 2009 13:27:09 -0800
parents a1980da045cc
children c771b7f43bbf
comparison
equal deleted inserted replaced
604:ec59443af135 605:98cb887364d3
990 990
991 _sp += 2; 991 _sp += 2;
992 Node *argument = pop(); // pop non-receiver first: it was pushed second 992 Node *argument = pop(); // pop non-receiver first: it was pushed second
993 Node *receiver = pop(); 993 Node *receiver = pop();
994 994
995 // don't intrinsify is argument isn't a constant string. 995 // don't intrinsify if argument isn't a constant string.
996 if (!argument->is_Con()) { 996 if (!argument->is_Con()) {
997 return false; 997 return false;
998 } 998 }
999 const TypeOopPtr* str_type = _gvn.type(argument)->isa_oopptr(); 999 const TypeOopPtr* str_type = _gvn.type(argument)->isa_oopptr();
1000 if (str_type == NULL) { 1000 if (str_type == NULL) {
1265 // } 1265 // }
1266 // } else { 1266 // } else {
1267 // result = DPow(x,y); 1267 // result = DPow(x,y);
1268 // } 1268 // }
1269 // if (result != result)? { 1269 // if (result != result)? {
1270 // ucommon_trap(); 1270 // uncommon_trap();
1271 // } 1271 // }
1272 // return result; 1272 // return result;
1273 1273
1274 _sp += arg_size(); // restore stack pointer 1274 _sp += arg_size(); // restore stack pointer
1275 Node* y = pop_math_arg(); 1275 Node* y = pop_math_arg();
1322 // Check (double)((int) y) : y 1322 // Check (double)((int) y) : y
1323 Node *cmpinty= _gvn.transform(new (C, 3) CmpDNode(doubleinty, y)); 1323 Node *cmpinty= _gvn.transform(new (C, 3) CmpDNode(doubleinty, y));
1324 // Check if (y isn't int) then go to slow path 1324 // Check if (y isn't int) then go to slow path
1325 1325
1326 Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmpinty, BoolTest::ne ) ); 1326 Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmpinty, BoolTest::ne ) );
1327 // Branch eith way 1327 // Branch either way
1328 IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); 1328 IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN);
1329 Node *slow_path = opt_iff(r,if2); // Set region path 2 1329 Node *slow_path = opt_iff(r,if2); // Set region path 2
1330 1330
1331 // Calculate DPow(abs(x), y)*(1 & (int)y) 1331 // Calculate DPow(abs(x), y)*(1 & (int)y)
1332 // Node for constant 1 1332 // Node for constant 1
1713 return basic_plus_adr(base, offset); 1713 return basic_plus_adr(base, offset);
1714 } 1714 }
1715 } 1715 }
1716 1716
1717 //----------------------------inline_reverseBytes_int/long------------------- 1717 //----------------------------inline_reverseBytes_int/long-------------------
1718 // inline Int.reverseBytes(int) 1718 // inline Integer.reverseBytes(int)
1719 // inline Long.reverseByes(long) 1719 // inline Long.reverseBytes(long)
1720 bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) { 1720 bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) {
1721 assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l, "not reverse Bytes"); 1721 assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l, "not reverse Bytes");
1722 if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false; 1722 if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false;
1723 if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL)) return false; 1723 if (id == vmIntrinsics::_reverseBytes_l && !Matcher::has_match_rule(Op_ReverseBytesL)) return false;
1724 _sp += arg_size(); // restore stack pointer 1724 _sp += arg_size(); // restore stack pointer
1913 if (is_volatile) { 1913 if (is_volatile) {
1914 // We need to emit leading and trailing CPU membars (see below) in 1914 // We need to emit leading and trailing CPU membars (see below) in
1915 // addition to memory membars when is_volatile. This is a little 1915 // addition to memory membars when is_volatile. This is a little
1916 // too strong, but avoids the need to insert per-alias-type 1916 // too strong, but avoids the need to insert per-alias-type
1917 // volatile membars (for stores; compare Parse::do_put_xxx), which 1917 // volatile membars (for stores; compare Parse::do_put_xxx), which
1918 // we cannot do effctively here because we probably only have a 1918 // we cannot do effectively here because we probably only have a
1919 // rough approximation of type. 1919 // rough approximation of type.
1920 need_mem_bar = true; 1920 need_mem_bar = true;
1921 // For Stores, place a memory ordering barrier now. 1921 // For Stores, place a memory ordering barrier now.
1922 if (is_store) 1922 if (is_store)
1923 insert_mem_bar(Op_MemBarRelease); 1923 insert_mem_bar(Op_MemBarRelease);
2097 // This basic scheme here is the same as inline_unsafe_access, but 2097 // This basic scheme here is the same as inline_unsafe_access, but
2098 // differs in enough details that combining them would make the code 2098 // differs in enough details that combining them would make the code
2099 // overly confusing. (This is a true fact! I originally combined 2099 // overly confusing. (This is a true fact! I originally combined
2100 // them, but even I was confused by it!) As much code/comments as 2100 // them, but even I was confused by it!) As much code/comments as
2101 // possible are retained from inline_unsafe_access though to make 2101 // possible are retained from inline_unsafe_access though to make
2102 // the correspondances clearer. - dl 2102 // the correspondences clearer. - dl
2103 2103
2104 if (callee()->is_static()) return false; // caller must have the capability! 2104 if (callee()->is_static()) return false; // caller must have the capability!
2105 2105
2106 #ifndef PRODUCT 2106 #ifndef PRODUCT
2107 { 2107 {
2164 Compile::AliasType* alias_type = C->alias_type(adr_type); 2164 Compile::AliasType* alias_type = C->alias_type(adr_type);
2165 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here"); 2165 assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
2166 int alias_idx = C->get_alias_index(adr_type); 2166 int alias_idx = C->get_alias_index(adr_type);
2167 2167
2168 // Memory-model-wise, a CAS acts like a little synchronized block, 2168 // Memory-model-wise, a CAS acts like a little synchronized block,
2169 // so needs barriers on each side. These don't't translate into 2169 // so needs barriers on each side. These don't translate into
2170 // actual barriers on most machines, but we still need rest of 2170 // actual barriers on most machines, but we still need rest of
2171 // compiler to respect ordering. 2171 // compiler to respect ordering.
2172 2172
2173 insert_mem_bar(Op_MemBarRelease); 2173 insert_mem_bar(Op_MemBarRelease);
2174 insert_mem_bar(Op_MemBarCPUOrder); 2174 insert_mem_bar(Op_MemBarCPUOrder);
3206 // vm: see markOop.hpp. 3206 // vm: see markOop.hpp.
3207 Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask); 3207 Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask);
3208 Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift); 3208 Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift);
3209 Node *hshifted_header= _gvn.transform( new (C, 3) URShiftXNode(header, hash_shift) ); 3209 Node *hshifted_header= _gvn.transform( new (C, 3) URShiftXNode(header, hash_shift) );
3210 // This hack lets the hash bits live anywhere in the mark object now, as long 3210 // This hack lets the hash bits live anywhere in the mark object now, as long
3211 // as the shift drops the relevent bits into the low 32 bits. Note that 3211 // as the shift drops the relevant bits into the low 32 bits. Note that
3212 // Java spec says that HashCode is an int so there's no point in capturing 3212 // Java spec says that HashCode is an int so there's no point in capturing
3213 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build). 3213 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
3214 hshifted_header = ConvX2I(hshifted_header); 3214 hshifted_header = ConvX2I(hshifted_header);
3215 Node *hash_val = _gvn.transform( new (C, 3) AndINode(hshifted_header, hash_mask) ); 3215 Node *hash_val = _gvn.transform( new (C, 3) AndINode(hshifted_header, hash_mask) );
3216 3216
3253 3253
3254 return true; 3254 return true;
3255 } 3255 }
3256 3256
3257 //---------------------------inline_native_getClass---------------------------- 3257 //---------------------------inline_native_getClass----------------------------
3258 // Build special case code for calls to hashCode on an object. 3258 // Build special case code for calls to getClass on an object.
3259 bool LibraryCallKit::inline_native_getClass() { 3259 bool LibraryCallKit::inline_native_getClass() {
3260 Node* obj = null_check_receiver(callee()); 3260 Node* obj = null_check_receiver(callee());
3261 if (stopped()) return true; 3261 if (stopped()) return true;
3262 push( load_mirror_from_klass(load_object_klass(obj)) ); 3262 push( load_mirror_from_klass(load_object_klass(obj)) );
3263 return true; 3263 return true;
4592 original_dest->set_req(0, control()); 4592 original_dest->set_req(0, control());
4593 _gvn.hash_find_insert(original_dest); // put back into GVN table 4593 _gvn.hash_find_insert(original_dest); // put back into GVN table
4594 } 4594 }
4595 4595
4596 // The memory edges above are precise in order to model effects around 4596 // The memory edges above are precise in order to model effects around
4597 // array copyies accurately to allow value numbering of field loads around 4597 // array copies accurately to allow value numbering of field loads around
4598 // arraycopy. Such field loads, both before and after, are common in Java 4598 // arraycopy. Such field loads, both before and after, are common in Java
4599 // collections and similar classes involving header/array data structures. 4599 // collections and similar classes involving header/array data structures.
4600 // 4600 //
4601 // But with low number of register or when some registers are used or killed 4601 // But with low number of register or when some registers are used or killed
4602 // by arraycopy calls it causes registers spilling on stack. See 6544710. 4602 // by arraycopy calls it causes registers spilling on stack. See 6544710.