comparison src/share/vm/opto/memnode.cpp @ 14909:4ca6dc0799b6

Backout jdk9 merge
author Gilles Duboscq <duboscq@ssw.jku.at>
date Tue, 01 Apr 2014 13:57:07 +0200
parents 45467c53f178
children 89152779163c
comparison
equal deleted inserted replaced
14908:8db6e76cb658 14909:4ca6dc0799b6
1 /* 1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
655 "must stay in the original alias category"); 655 "must stay in the original alias category");
656 // The type of the address must be contained in the adr_type, 656 // The type of the address must be contained in the adr_type,
657 // disregarding "null"-ness. 657 // disregarding "null"-ness.
658 // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.) 658 // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
659 const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr(); 659 const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
660 assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(), 660 assert(cross_check->meet(tp_notnull) == cross_check,
661 "real address must not escape from expected memory type"); 661 "real address must not escape from expected memory type");
662 } 662 }
663 #endif 663 #endif
664 return tp; 664 return tp;
665 } 665 }
905 } 905 }
906 #endif 906 #endif
907 907
908 //----------------------------LoadNode::make----------------------------------- 908 //----------------------------LoadNode::make-----------------------------------
909 // Polymorphic factory method: 909 // Polymorphic factory method:
910 Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo) { 910 Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) {
911 Compile* C = gvn.C; 911 Compile* C = gvn.C;
912 912
913 // sanity check the alias category against the created node type 913 // sanity check the alias category against the created node type
914 assert(!(adr_type->isa_oopptr() && 914 assert(!(adr_type->isa_oopptr() &&
915 adr_type->offset() == oopDesc::klass_offset_in_bytes()), 915 adr_type->offset() == oopDesc::klass_offset_in_bytes()),
921 assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw || 921 assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
922 // oop will be recorded in oop map if load crosses safepoint 922 // oop will be recorded in oop map if load crosses safepoint
923 rt->isa_oopptr() || is_immutable_value(adr), 923 rt->isa_oopptr() || is_immutable_value(adr),
924 "raw memory operations should have control edge"); 924 "raw memory operations should have control edge");
925 switch (bt) { 925 switch (bt) {
926 case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo); 926 case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int() );
927 case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo); 927 case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int() );
928 case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo); 928 case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int() );
929 case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo); 929 case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int() );
930 case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo); 930 case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int() );
931 case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo); 931 case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long() );
932 case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt, mo); 932 case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt );
933 case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt, mo); 933 case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt );
934 case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo); 934 case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr() );
935 case T_OBJECT: 935 case T_OBJECT:
936 #ifdef _LP64 936 #ifdef _LP64
937 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 937 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
938 Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo)); 938 Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop()));
939 return new (C) DecodeNNode(load, load->bottom_type()->make_ptr()); 939 return new (C) DecodeNNode(load, load->bottom_type()->make_ptr());
940 } else 940 } else
941 #endif 941 #endif
942 { 942 {
943 assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop"); 943 assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
944 return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo); 944 return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr());
945 } 945 }
946 } 946 }
947 ShouldNotReachHere(); 947 ShouldNotReachHere();
948 return (LoadNode*)NULL; 948 return (LoadNode*)NULL;
949 } 949 }
950 950
951 LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) { 951 LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt) {
952 bool require_atomic = true; 952 bool require_atomic = true;
953 return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic); 953 return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), require_atomic);
954 } 954 }
955 955
956 956
957 957
958 958
1000 // through any kind of MemBar but normal loads shouldn't skip 1000 // through any kind of MemBar but normal loads shouldn't skip
1001 // through MemBarAcquire since the could allow them to move out of 1001 // through MemBarAcquire since the could allow them to move out of
1002 // a synchronized region. 1002 // a synchronized region.
1003 while (current->is_Proj()) { 1003 while (current->is_Proj()) {
1004 int opc = current->in(0)->Opcode(); 1004 int opc = current->in(0)->Opcode();
1005 if ((final && (opc == Op_MemBarAcquire || 1005 if ((final && (opc == Op_MemBarAcquire || opc == Op_MemBarAcquireLock)) ||
1006 opc == Op_MemBarAcquireLock || 1006 opc == Op_MemBarRelease || opc == Op_MemBarCPUOrder ||
1007 opc == Op_LoadFence)) || 1007 opc == Op_MemBarReleaseLock) {
1008 opc == Op_MemBarRelease ||
1009 opc == Op_StoreFence ||
1010 opc == Op_MemBarReleaseLock ||
1011 opc == Op_MemBarCPUOrder) {
1012 Node* mem = current->in(0)->in(TypeFunc::Memory); 1008 Node* mem = current->in(0)->in(TypeFunc::Memory);
1013 if (mem->is_MergeMem()) { 1009 if (mem->is_MergeMem()) {
1014 MergeMemNode* merge = mem->as_MergeMem(); 1010 MergeMemNode* merge = mem->as_MergeMem();
1015 Node* new_st = merge->memory_at(alias_idx); 1011 Node* new_st = merge->memory_at(alias_idx);
1016 if (new_st == merge->base_memory()) { 1012 if (new_st == merge->base_memory()) {
1683 && (_type->isa_vect() == NULL) 1679 && (_type->isa_vect() == NULL)
1684 && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { 1680 && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
1685 // t might actually be lower than _type, if _type is a unique 1681 // t might actually be lower than _type, if _type is a unique
1686 // concrete subclass of abstract class t. 1682 // concrete subclass of abstract class t.
1687 if (off_beyond_header) { // is the offset beyond the header? 1683 if (off_beyond_header) { // is the offset beyond the header?
1688 const Type* jt = t->join_speculative(_type); 1684 const Type* jt = t->join(_type);
1689 // In any case, do not allow the join, per se, to empty out the type. 1685 // In any case, do not allow the join, per se, to empty out the type.
1690 if (jt->empty() && !t->empty()) { 1686 if (jt->empty() && !t->empty()) {
1691 // This can happen if a interface-typed array narrows to a class type. 1687 // This can happen if a interface-typed array narrows to a class type.
1692 jt = _type; 1688 jt = _type;
1693 } 1689 }
2034 const TypePtr *adr_type = adr->bottom_type()->isa_ptr(); 2030 const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
2035 assert(adr_type != NULL, "expecting TypeKlassPtr"); 2031 assert(adr_type != NULL, "expecting TypeKlassPtr");
2036 #ifdef _LP64 2032 #ifdef _LP64
2037 if (adr_type->is_ptr_to_narrowklass()) { 2033 if (adr_type->is_ptr_to_narrowklass()) {
2038 assert(UseCompressedClassPointers, "no compressed klasses"); 2034 assert(UseCompressedClassPointers, "no compressed klasses");
2039 Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered)); 2035 Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass()));
2040 return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr()); 2036 return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
2041 } 2037 }
2042 #endif 2038 #endif
2043 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop"); 2039 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
2044 return new (C) LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered); 2040 return new (C) LoadKlassNode(ctl, mem, adr, at, tk);
2045 } 2041 }
2046 2042
2047 //------------------------------Value------------------------------------------ 2043 //------------------------------Value------------------------------------------
2048 const Type *LoadKlassNode::Value( PhaseTransform *phase ) const { 2044 const Type *LoadKlassNode::Value( PhaseTransform *phase ) const {
2049 return klass_value_common(phase); 2045 return klass_value_common(phase);
2354 } 2350 }
2355 2351
2356 //============================================================================= 2352 //=============================================================================
2357 //---------------------------StoreNode::make----------------------------------- 2353 //---------------------------StoreNode::make-----------------------------------
2358 // Polymorphic factory method: 2354 // Polymorphic factory method:
2359 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) { 2355 StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) {
2360 assert((mo == unordered || mo == release), "unexpected");
2361 Compile* C = gvn.C; 2356 Compile* C = gvn.C;
2362 assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw || 2357 assert( C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
2363 ctl != NULL, "raw memory operations should have control edge"); 2358 ctl != NULL, "raw memory operations should have control edge");
2364 2359
2365 switch (bt) { 2360 switch (bt) {
2366 case T_BOOLEAN: 2361 case T_BOOLEAN:
2367 case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val, mo); 2362 case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val);
2368 case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val, mo); 2363 case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val);
2369 case T_CHAR: 2364 case T_CHAR:
2370 case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val, mo); 2365 case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val);
2371 case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo); 2366 case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val);
2372 case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val, mo); 2367 case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val);
2373 case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val, mo); 2368 case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val);
2374 case T_METADATA: 2369 case T_METADATA:
2375 case T_ADDRESS: 2370 case T_ADDRESS:
2376 case T_OBJECT: 2371 case T_OBJECT:
2377 #ifdef _LP64 2372 #ifdef _LP64
2378 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 2373 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2379 val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop())); 2374 val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
2380 return new (C) StoreNNode(ctl, mem, adr, adr_type, val, mo); 2375 return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
2381 } else if (adr->bottom_type()->is_ptr_to_narrowklass() || 2376 } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
2382 (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() && 2377 (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
2383 adr->bottom_type()->isa_rawptr())) { 2378 adr->bottom_type()->isa_rawptr())) {
2384 val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass())); 2379 val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
2385 return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val, mo); 2380 return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val);
2386 } 2381 }
2387 #endif 2382 #endif
2388 { 2383 {
2389 return new (C) StorePNode(ctl, mem, adr, adr_type, val, mo); 2384 return new (C) StorePNode(ctl, mem, adr, adr_type, val);
2390 } 2385 }
2391 } 2386 }
2392 ShouldNotReachHere(); 2387 ShouldNotReachHere();
2393 return (StoreNode*)NULL; 2388 return (StoreNode*)NULL;
2394 } 2389 }
2395 2390
2396 StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) { 2391 StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val) {
2397 bool require_atomic = true; 2392 bool require_atomic = true;
2398 return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic); 2393 return new (C) StoreLNode(ctl, mem, adr, adr_type, val, require_atomic);
2399 } 2394 }
2400 2395
2401 2396
2402 //--------------------------bottom_type---------------------------------------- 2397 //--------------------------bottom_type----------------------------------------
2403 const Type *StoreNode::bottom_type() const { 2398 const Type *StoreNode::bottom_type() const {
2786 if( adr->Opcode() != Op_AddP ) Unimplemented(); 2781 if( adr->Opcode() != Op_AddP ) Unimplemented();
2787 Node *base = adr->in(1); 2782 Node *base = adr->in(1);
2788 2783
2789 Node *zero = phase->makecon(TypeLong::ZERO); 2784 Node *zero = phase->makecon(TypeLong::ZERO);
2790 Node *off = phase->MakeConX(BytesPerLong); 2785 Node *off = phase->MakeConX(BytesPerLong);
2791 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); 2786 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero);
2792 count--; 2787 count--;
2793 while( count-- ) { 2788 while( count-- ) {
2794 mem = phase->transform(mem); 2789 mem = phase->transform(mem);
2795 adr = phase->transform(new (phase->C) AddPNode(base,adr,off)); 2790 adr = phase->transform(new (phase->C) AddPNode(base,adr,off));
2796 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); 2791 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero);
2797 } 2792 }
2798 return mem; 2793 return mem;
2799 } 2794 }
2800 2795
2801 //----------------------------step_through---------------------------------- 2796 //----------------------------step_through----------------------------------
2835 int unit = BytesPerLong; 2830 int unit = BytesPerLong;
2836 if ((offset % unit) != 0) { 2831 if ((offset % unit) != 0) {
2837 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset)); 2832 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset));
2838 adr = phase->transform(adr); 2833 adr = phase->transform(adr);
2839 const TypePtr* atp = TypeRawPtr::BOTTOM; 2834 const TypePtr* atp = TypeRawPtr::BOTTOM;
2840 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); 2835 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
2841 mem = phase->transform(mem); 2836 mem = phase->transform(mem);
2842 offset += BytesPerInt; 2837 offset += BytesPerInt;
2843 } 2838 }
2844 assert((offset % unit) == 0, ""); 2839 assert((offset % unit) == 0, "");
2845 2840
2896 } 2891 }
2897 if (done_offset < end_offset) { // emit the final 32-bit store 2892 if (done_offset < end_offset) { // emit the final 32-bit store
2898 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset)); 2893 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset));
2899 adr = phase->transform(adr); 2894 adr = phase->transform(adr);
2900 const TypePtr* atp = TypeRawPtr::BOTTOM; 2895 const TypePtr* atp = TypeRawPtr::BOTTOM;
2901 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); 2896 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
2902 mem = phase->transform(mem); 2897 mem = phase->transform(mem);
2903 done_offset += BytesPerInt; 2898 done_offset += BytesPerInt;
2904 } 2899 }
2905 assert(done_offset == end_offset, ""); 2900 assert(done_offset == end_offset, "");
2906 return mem; 2901 return mem;
2980 } 2975 }
2981 2976
2982 //------------------------------make------------------------------------------- 2977 //------------------------------make-------------------------------------------
2983 MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) { 2978 MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
2984 switch (opcode) { 2979 switch (opcode) {
2985 case Op_MemBarAcquire: return new(C) MemBarAcquireNode(C, atp, pn); 2980 case Op_MemBarAcquire: return new(C) MemBarAcquireNode(C, atp, pn);
2986 case Op_LoadFence: return new(C) LoadFenceNode(C, atp, pn); 2981 case Op_MemBarRelease: return new(C) MemBarReleaseNode(C, atp, pn);
2987 case Op_MemBarRelease: return new(C) MemBarReleaseNode(C, atp, pn); 2982 case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C, atp, pn);
2988 case Op_StoreFence: return new(C) StoreFenceNode(C, atp, pn); 2983 case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C, atp, pn);
2989 case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C, atp, pn); 2984 case Op_MemBarVolatile: return new(C) MemBarVolatileNode(C, atp, pn);
2990 case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C, atp, pn); 2985 case Op_MemBarCPUOrder: return new(C) MemBarCPUOrderNode(C, atp, pn);
2991 case Op_MemBarVolatile: return new(C) MemBarVolatileNode(C, atp, pn); 2986 case Op_Initialize: return new(C) InitializeNode(C, atp, pn);
2992 case Op_MemBarCPUOrder: return new(C) MemBarCPUOrderNode(C, atp, pn); 2987 case Op_MemBarStoreStore: return new(C) MemBarStoreStoreNode(C, atp, pn);
2993 case Op_Initialize: return new(C) InitializeNode(C, atp, pn); 2988 default: ShouldNotReachHere(); return NULL;
2994 case Op_MemBarStoreStore: return new(C) MemBarStoreStoreNode(C, atp, pn);
2995 default: ShouldNotReachHere(); return NULL;
2996 } 2989 }
2997 } 2990 }
2998 2991
2999 //------------------------------Ideal------------------------------------------ 2992 //------------------------------Ideal------------------------------------------
3000 // Return a node which is more "ideal" than the current node. Strip out 2993 // Return a node which is more "ideal" than the current node. Strip out
3772 int nst = 0; 3765 int nst = 0;
3773 if (!split) { 3766 if (!split) {
3774 ++new_long; 3767 ++new_long;
3775 off[nst] = offset; 3768 off[nst] = offset;
3776 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, 3769 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
3777 phase->longcon(con), T_LONG, MemNode::unordered); 3770 phase->longcon(con), T_LONG);
3778 } else { 3771 } else {
3779 // Omit either if it is a zero. 3772 // Omit either if it is a zero.
3780 if (con0 != 0) { 3773 if (con0 != 0) {
3781 ++new_int; 3774 ++new_int;
3782 off[nst] = offset; 3775 off[nst] = offset;
3783 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, 3776 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
3784 phase->intcon(con0), T_INT, MemNode::unordered); 3777 phase->intcon(con0), T_INT);
3785 } 3778 }
3786 if (con1 != 0) { 3779 if (con1 != 0) {
3787 ++new_int; 3780 ++new_int;
3788 offset += BytesPerInt; 3781 offset += BytesPerInt;
3789 adr = make_raw_address(offset, phase); 3782 adr = make_raw_address(offset, phase);
3790 off[nst] = offset; 3783 off[nst] = offset;
3791 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, 3784 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
3792 phase->intcon(con1), T_INT, MemNode::unordered); 3785 phase->intcon(con1), T_INT);
3793 } 3786 }
3794 } 3787 }
3795 3788
3796 // Insert second store first, then the first before the second. 3789 // Insert second store first, then the first before the second.
3797 // Insert each one just before any overlapping non-constant stores. 3790 // Insert each one just before any overlapping non-constant stores.