comparison src/share/vm/opto/library_call.cpp @ 14429:2113136690bc

8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering Summary: Add a field to C2 LoadNode and StoreNode classes which indicates whether the load/store should do an acquire/release on platforms which support it. Reviewed-by: kvn
author goetz
date Fri, 15 Nov 2013 11:05:32 -0800
parents a57a165b8296
children 50fdb38839eb
comparison
equal deleted inserted replaced
14427:eb178e97560c 14429:2113136690bc
1055 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) { 1055 Node* LibraryCallKit::generate_current_thread(Node* &tls_output) {
1056 ciKlass* thread_klass = env()->Thread_klass(); 1056 ciKlass* thread_klass = env()->Thread_klass();
1057 const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull); 1057 const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
1058 Node* thread = _gvn.transform(new (C) ThreadLocalNode()); 1058 Node* thread = _gvn.transform(new (C) ThreadLocalNode());
1059 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset())); 1059 Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
1060 Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT); 1060 Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
1061 tls_output = thread; 1061 tls_output = thread;
1062 return threadObj; 1062 return threadObj;
1063 } 1063 }
1064 1064
1065 1065
2638 // of safe & unsafe memory. Otherwise fails in a CTW of rt.jar 2638 // of safe & unsafe memory. Otherwise fails in a CTW of rt.jar
2639 // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl. 2639 // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl.
2640 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); 2640 if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
2641 2641
2642 if (!is_store) { 2642 if (!is_store) {
2643 Node* p = make_load(control(), adr, value_type, type, adr_type, is_volatile); 2643 Node* p = make_load(control(), adr, value_type, type, adr_type, MemNode::unordered, is_volatile);
2644 // load value 2644 // load value
2645 switch (type) { 2645 switch (type) {
2646 case T_BOOLEAN: 2646 case T_BOOLEAN:
2647 case T_CHAR: 2647 case T_CHAR:
2648 case T_BYTE: 2648 case T_BYTE:
2682 val = ConvL2X(val); 2682 val = ConvL2X(val);
2683 val = _gvn.transform(new (C) CastX2PNode(val)); 2683 val = _gvn.transform(new (C) CastX2PNode(val));
2684 break; 2684 break;
2685 } 2685 }
2686 2686
2687 MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
2687 if (type != T_OBJECT ) { 2688 if (type != T_OBJECT ) {
2688 (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile); 2689 (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile);
2689 } else { 2690 } else {
2690 // Possibly an oop being stored to Java heap or native memory 2691 // Possibly an oop being stored to Java heap or native memory
2691 if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) { 2692 if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
2692 // oop to Java heap. 2693 // oop to Java heap.
2693 (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type); 2694 (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2694 } else { 2695 } else {
2695 // We can't tell at compile time if we are storing in the Java heap or outside 2696 // We can't tell at compile time if we are storing in the Java heap or outside
2696 // of it. So we need to emit code to conditionally do the proper type of 2697 // of it. So we need to emit code to conditionally do the proper type of
2697 // store. 2698 // store.
2698 2699
2700 #define __ ideal. 2701 #define __ ideal.
2701 // QQQ who knows what probability is here?? 2702 // QQQ who knows what probability is here??
2702 __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); { 2703 __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
2703 // Sync IdealKit and graphKit. 2704 // Sync IdealKit and graphKit.
2704 sync_kit(ideal); 2705 sync_kit(ideal);
2705 Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type); 2706 Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
2706 // Update IdealKit memory. 2707 // Update IdealKit memory.
2707 __ sync_kit(this); 2708 __ sync_kit(this);
2708 } __ else_(); { 2709 } __ else_(); {
2709 __ store(__ ctrl(), adr, val, type, alias_type->index(), is_volatile); 2710 __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile);
2710 } __ end_if(); 2711 } __ end_if();
2711 // Final sync IdealKit and GraphKit. 2712 // Final sync IdealKit and GraphKit.
2712 final_sync(ideal); 2713 final_sync(ideal);
2713 #undef __ 2714 #undef __
2714 } 2715 }
2977 #ifdef _LP64 2978 #ifdef _LP64
2978 if (adr->bottom_type()->is_ptr_to_narrowoop()) { 2979 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
2979 Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop())); 2980 Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
2980 if (kind == LS_xchg) { 2981 if (kind == LS_xchg) {
2981 load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr, 2982 load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
2982 newval_enc, adr_type, value_type->make_narrowoop())); 2983 newval_enc, adr_type, value_type->make_narrowoop()));
2983 } else { 2984 } else {
2984 assert(kind == LS_cmpxchg, "wrong LoadStore operation"); 2985 assert(kind == LS_cmpxchg, "wrong LoadStore operation");
2985 Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop())); 2986 Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
2986 load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr, 2987 load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr,
2987 newval_enc, oldval_enc)); 2988 newval_enc, oldval_enc));
2988 } 2989 }
2989 } else 2990 } else
2990 #endif 2991 #endif
2991 { 2992 {
2992 if (kind == LS_xchg) { 2993 if (kind == LS_xchg) {
3088 insert_mem_bar(Op_MemBarCPUOrder); 3089 insert_mem_bar(Op_MemBarCPUOrder);
3089 // Ensure that the store is atomic for longs: 3090 // Ensure that the store is atomic for longs:
3090 const bool require_atomic_access = true; 3091 const bool require_atomic_access = true;
3091 Node* store; 3092 Node* store;
3092 if (type == T_OBJECT) // reference stores need a store barrier. 3093 if (type == T_OBJECT) // reference stores need a store barrier.
3093 store = store_oop_to_unknown(control(), base, adr, adr_type, val, type); 3094 store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
3094 else { 3095 else {
3095 store = store_to_memory(control(), adr, val, type, adr_type, require_atomic_access); 3096 store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
3096 } 3097 }
3097 insert_mem_bar(Op_MemBarCPUOrder); 3098 insert_mem_bar(Op_MemBarCPUOrder);
3098 return true; 3099 return true;
3099 } 3100 }
3100 3101
3150 // Serializable.class or Object[].class. The runtime will handle it. 3151 // Serializable.class or Object[].class. The runtime will handle it.
3151 // But we must make an explicit check for initialization. 3152 // But we must make an explicit check for initialization.
3152 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset())); 3153 Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
3153 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler 3154 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
3154 // can generate code to load it as unsigned byte. 3155 // can generate code to load it as unsigned byte.
3155 Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN); 3156 Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
3156 Node* bits = intcon(InstanceKlass::fully_initialized); 3157 Node* bits = intcon(InstanceKlass::fully_initialized);
3157 test = _gvn.transform(new (C) SubINode(inst, bits)); 3158 test = _gvn.transform(new (C) SubINode(inst, bits));
3158 // The 'test' is non-zero if we need to take a slow path. 3159 // The 'test' is non-zero if we need to take a slow path.
3159 } 3160 }
3160 3161
3174 Node* cls = null_check(argument(1), T_OBJECT); 3175 Node* cls = null_check(argument(1), T_OBJECT);
3175 Node* kls = load_klass_from_mirror(cls, false, NULL, 0); 3176 Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
3176 kls = null_check(kls, T_OBJECT); 3177 kls = null_check(kls, T_OBJECT);
3177 ByteSize offset = TRACE_ID_OFFSET; 3178 ByteSize offset = TRACE_ID_OFFSET;
3178 Node* insp = basic_plus_adr(kls, in_bytes(offset)); 3179 Node* insp = basic_plus_adr(kls, in_bytes(offset));
3179 Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG); 3180 Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG, MemNode::unordered);
3180 Node* bits = longcon(~0x03l); // ignore bit 0 & 1 3181 Node* bits = longcon(~0x03l); // ignore bit 0 & 1
3181 Node* andl = _gvn.transform(new (C) AndLNode(tvalue, bits)); 3182 Node* andl = _gvn.transform(new (C) AndLNode(tvalue, bits));
3182 Node* clsused = longcon(0x01l); // set the class bit 3183 Node* clsused = longcon(0x01l); // set the class bit
3183 Node* orl = _gvn.transform(new (C) OrLNode(tvalue, clsused)); 3184 Node* orl = _gvn.transform(new (C) OrLNode(tvalue, clsused));
3184 3185
3185 const TypePtr *adr_type = _gvn.type(insp)->isa_ptr(); 3186 const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
3186 store_to_memory(control(), insp, orl, T_LONG, adr_type); 3187 store_to_memory(control(), insp, orl, T_LONG, adr_type, MemNode::unordered);
3187 set_result(andl); 3188 set_result(andl);
3188 return true; 3189 return true;
3189 } 3190 }
3190 3191
3191 bool LibraryCallKit::inline_native_threadID() { 3192 bool LibraryCallKit::inline_native_threadID() {
3192 Node* tls_ptr = NULL; 3193 Node* tls_ptr = NULL;
3193 Node* cur_thr = generate_current_thread(tls_ptr); 3194 Node* cur_thr = generate_current_thread(tls_ptr);
3194 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset())); 3195 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3195 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS); 3196 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3196 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset())); 3197 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset()));
3197 3198
3198 Node* threadid = NULL; 3199 Node* threadid = NULL;
3199 size_t thread_id_size = OSThread::thread_id_size(); 3200 size_t thread_id_size = OSThread::thread_id_size();
3200 if (thread_id_size == (size_t) BytesPerLong) { 3201 if (thread_id_size == (size_t) BytesPerLong) {
3201 threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG)); 3202 threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG, MemNode::unordered));
3202 } else if (thread_id_size == (size_t) BytesPerInt) { 3203 } else if (thread_id_size == (size_t) BytesPerInt) {
3203 threadid = make_load(control(), p, TypeInt::INT, T_INT); 3204 threadid = make_load(control(), p, TypeInt::INT, T_INT, MemNode::unordered);
3204 } else { 3205 } else {
3205 ShouldNotReachHere(); 3206 ShouldNotReachHere();
3206 } 3207 }
3207 set_result(threadid); 3208 set_result(threadid);
3208 return true; 3209 return true;
3273 3274
3274 generate_slow_guard(bol_thr, slow_region); 3275 generate_slow_guard(bol_thr, slow_region);
3275 3276
3276 // (b) Interrupt bit on TLS must be false. 3277 // (b) Interrupt bit on TLS must be false.
3277 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset())); 3278 Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
3278 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS); 3279 Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3279 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset())); 3280 p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
3280 3281
3281 // Set the control input on the field _interrupted read to prevent it floating up. 3282 // Set the control input on the field _interrupted read to prevent it floating up.
3282 Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT); 3283 Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT, MemNode::unordered);
3283 Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0))); 3284 Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0)));
3284 Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne)); 3285 Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne));
3285 3286
3286 IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN); 3287 IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
3287 3288
3345 3346
3346 //---------------------------load_mirror_from_klass---------------------------- 3347 //---------------------------load_mirror_from_klass----------------------------
3347 // Given a klass oop, load its java mirror (a java.lang.Class oop). 3348 // Given a klass oop, load its java mirror (a java.lang.Class oop).
3348 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) { 3349 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
3349 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset())); 3350 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
3350 return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT); 3351 return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
3351 } 3352 }
3352 3353
3353 //-----------------------load_klass_from_mirror_common------------------------- 3354 //-----------------------load_klass_from_mirror_common-------------------------
3354 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop. 3355 // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
3355 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE), 3356 // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
3382 // Fall through if (mods & mask) == bits, take the guard otherwise. 3383 // Fall through if (mods & mask) == bits, take the guard otherwise.
3383 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) { 3384 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3384 // Branch around if the given klass has the given modifier bit set. 3385 // Branch around if the given klass has the given modifier bit set.
3385 // Like generate_guard, adds a new path onto the region. 3386 // Like generate_guard, adds a new path onto the region.
3386 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); 3387 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3387 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT); 3388 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3388 Node* mask = intcon(modifier_mask); 3389 Node* mask = intcon(modifier_mask);
3389 Node* bits = intcon(modifier_bits); 3390 Node* bits = intcon(modifier_bits);
3390 Node* mbit = _gvn.transform(new (C) AndINode(mods, mask)); 3391 Node* mbit = _gvn.transform(new (C) AndINode(mods, mask));
3391 Node* cmp = _gvn.transform(new (C) CmpINode(mbit, bits)); 3392 Node* cmp = _gvn.transform(new (C) CmpINode(mbit, bits));
3392 Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne)); 3393 Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
3499 query_value = gen_instanceof(obj, kls, safe_for_replace); 3500 query_value = gen_instanceof(obj, kls, safe_for_replace);
3500 break; 3501 break;
3501 3502
3502 case vmIntrinsics::_getModifiers: 3503 case vmIntrinsics::_getModifiers:
3503 p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset())); 3504 p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
3504 query_value = make_load(NULL, p, TypeInt::INT, T_INT); 3505 query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3505 break; 3506 break;
3506 3507
3507 case vmIntrinsics::_isInterface: 3508 case vmIntrinsics::_isInterface:
3508 // (To verify this code sequence, check the asserts in JVM_IsInterface.) 3509 // (To verify this code sequence, check the asserts in JVM_IsInterface.)
3509 if (generate_interface_guard(kls, region) != NULL) 3510 if (generate_interface_guard(kls, region) != NULL)
3557 case vmIntrinsics::_getComponentType: 3558 case vmIntrinsics::_getComponentType:
3558 if (generate_array_guard(kls, region) != NULL) { 3559 if (generate_array_guard(kls, region) != NULL) {
3559 // Be sure to pin the oop load to the guard edge just created: 3560 // Be sure to pin the oop load to the guard edge just created:
3560 Node* is_array_ctrl = region->in(region->req()-1); 3561 Node* is_array_ctrl = region->in(region->req()-1);
3561 Node* cma = basic_plus_adr(kls, in_bytes(ArrayKlass::component_mirror_offset())); 3562 Node* cma = basic_plus_adr(kls, in_bytes(ArrayKlass::component_mirror_offset()));
3562 Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT); 3563 Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
3563 phi->add_req(cmo); 3564 phi->add_req(cmo);
3564 } 3565 }
3565 query_value = null(); // non-array case is null 3566 query_value = null(); // non-array case is null
3566 break; 3567 break;
3567 3568
3568 case vmIntrinsics::_getClassAccessFlags: 3569 case vmIntrinsics::_getClassAccessFlags:
3569 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); 3570 p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3570 query_value = make_load(NULL, p, TypeInt::INT, T_INT); 3571 query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
3571 break; 3572 break;
3572 3573
3573 default: 3574 default:
3574 fatal_unexpected_iid(id); 3575 fatal_unexpected_iid(id);
3575 break; 3576 break;
3931 // Get the Method* out of the appropriate vtable entry. 3932 // Get the Method* out of the appropriate vtable entry.
3932 int entry_offset = (InstanceKlass::vtable_start_offset() + 3933 int entry_offset = (InstanceKlass::vtable_start_offset() +
3933 vtable_index*vtableEntry::size()) * wordSize + 3934 vtable_index*vtableEntry::size()) * wordSize +
3934 vtableEntry::method_offset_in_bytes(); 3935 vtableEntry::method_offset_in_bytes();
3935 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset); 3936 Node* entry_addr = basic_plus_adr(obj_klass, entry_offset);
3936 Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS); 3937 Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
3937 3938
3938 // Compare the target method with the expected method (e.g., Object.hashCode). 3939 // Compare the target method with the expected method (e.g., Object.hashCode).
3939 const TypePtr* native_call_addr = TypeMetadataPtr::make(method); 3940 const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
3940 3941
3941 Node* native_call = makecon(native_call_addr); 3942 Node* native_call = makecon(native_call_addr);
4057 generate_virtual_guard(obj_klass, slow_region); 4058 generate_virtual_guard(obj_klass, slow_region);
4058 } 4059 }
4059 4060
4060 // Get the header out of the object, use LoadMarkNode when available 4061 // Get the header out of the object, use LoadMarkNode when available
4061 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); 4062 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4062 Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type()); 4063 Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4063 4064
4064 // Test the header to see if it is unlocked. 4065 // Test the header to see if it is unlocked.
4065 Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); 4066 Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
4066 Node *lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask)); 4067 Node *lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask));
4067 Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value); 4068 Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value);
5478 start = _gvn.transform(new(C) AndXNode(start, MakeConX(~to_clear))); 5479 start = _gvn.transform(new(C) AndXNode(start, MakeConX(~to_clear)));
5479 if (bump_bit != 0) { 5480 if (bump_bit != 0) {
5480 // Store a zero to the immediately preceding jint: 5481 // Store a zero to the immediately preceding jint:
5481 Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit))); 5482 Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit)));
5482 Node* p1 = basic_plus_adr(dest, x1); 5483 Node* p1 = basic_plus_adr(dest, x1);
5483 mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT); 5484 mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered);
5484 mem = _gvn.transform(mem); 5485 mem = _gvn.transform(mem);
5485 } 5486 }
5486 } 5487 }
5487 Node* end = dest_size; // pre-rounded 5488 Node* end = dest_size; // pre-rounded
5488 mem = ClearArrayNode::clear_memory(control(), mem, dest, 5489 mem = ClearArrayNode::clear_memory(control(), mem, dest,
5528 // This is a common case, since abase can be odd mod 8. 5529 // This is a common case, since abase can be odd mod 8.
5529 if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt && 5530 if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt &&
5530 ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) { 5531 ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) {
5531 Node* sptr = basic_plus_adr(src, src_off); 5532 Node* sptr = basic_plus_adr(src, src_off);
5532 Node* dptr = basic_plus_adr(dest, dest_off); 5533 Node* dptr = basic_plus_adr(dest, dest_off);
5533 Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type); 5534 Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
5534 store_to_memory(control(), dptr, sval, T_INT, adr_type); 5535 store_to_memory(control(), dptr, sval, T_INT, adr_type, MemNode::unordered);
5535 src_off += BytesPerInt; 5536 src_off += BytesPerInt;
5536 dest_off += BytesPerInt; 5537 dest_off += BytesPerInt;
5537 } else { 5538 } else {
5538 return false; 5539 return false;
5539 } 5540 }
5594 // for the target array. This is an optimistic check. It will 5595 // for the target array. This is an optimistic check. It will
5595 // look in each non-null element's class, at the desired klass's 5596 // look in each non-null element's class, at the desired klass's
5596 // super_check_offset, for the desired klass. 5597 // super_check_offset, for the desired klass.
5597 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 5598 int sco_offset = in_bytes(Klass::super_check_offset_offset());
5598 Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset); 5599 Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
5599 Node* n3 = new(C) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr()); 5600 Node* n3 = new(C) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered);
5600 Node* check_offset = ConvI2X(_gvn.transform(n3)); 5601 Node* check_offset = ConvI2X(_gvn.transform(n3));
5601 Node* check_value = dest_elem_klass; 5602 Node* check_value = dest_elem_klass;
5602 5603
5603 Node* src_start = array_element_address(src, src_offset, T_OBJECT); 5604 Node* src_start = array_element_address(src, src_offset, T_OBJECT);
5604 Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT); 5605 Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);
5735 result = _gvn.transform(new (C) AndINode(result, intcon(0xFF))); 5736 result = _gvn.transform(new (C) AndINode(result, intcon(0xFF)));
5736 5737
5737 Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr())); 5738 Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
5738 Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2))); 5739 Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2)));
5739 Node* adr = basic_plus_adr(top(), base, ConvI2X(offset)); 5740 Node* adr = basic_plus_adr(top(), base, ConvI2X(offset));
5740 result = make_load(control(), adr, TypeInt::INT, T_INT); 5741 result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);
5741 5742
5742 crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8))); 5743 crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8)));
5743 result = _gvn.transform(new (C) XorINode(crc, result)); 5744 result = _gvn.transform(new (C) XorINode(crc, result));
5744 result = _gvn.transform(new (C) XorINode(result, M1)); 5745 result = _gvn.transform(new (C) XorINode(result, M1));
5745 set_result(result); 5746 set_result(result);
5836 5837
5837 ciInstanceKlass* klass = env()->Object_klass(); 5838 ciInstanceKlass* klass = env()->Object_klass();
5838 const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass); 5839 const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
5839 5840
5840 Node* no_ctrl = NULL; 5841 Node* no_ctrl = NULL;
5841 Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT); 5842 Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
5842 5843
5843 // Use the pre-barrier to record the value in the referent field 5844 // Use the pre-barrier to record the value in the referent field
5844 pre_barrier(false /* do_load */, 5845 pre_barrier(false /* do_load */,
5845 control(), 5846 control(),
5846 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, 5847 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
5883 5884
5884 // Build the resultant type of the load 5885 // Build the resultant type of the load
5885 const Type *type = TypeOopPtr::make_from_klass(field_klass->as_klass()); 5886 const Type *type = TypeOopPtr::make_from_klass(field_klass->as_klass());
5886 5887
5887 // Build the load. 5888 // Build the load.
5888 Node* loadedField = make_load(NULL, adr, type, bt, adr_type, is_vol); 5889 Node* loadedField = make_load(NULL, adr, type, bt, adr_type, MemNode::unordered, is_vol);
5889 return loadedField; 5890 return loadedField;
5890 } 5891 }
5891 5892
5892 5893
5893 //------------------------------inline_aescrypt_Block----------------------- 5894 //------------------------------inline_aescrypt_Block-----------------------