comparison src/share/vm/runtime/park.cpp @ 14909:4ca6dc0799b6

Backout jdk9 merge
author Gilles Duboscq <duboscq@ssw.jku.at>
date Tue, 01 Apr 2014 13:57:07 +0200
parents 2c95095271e9
children 52b4284cb496
comparison
equal deleted inserted replaced
14908:8db6e76cb658 14909:4ca6dc0799b6
57 // In rare cases -- JVM_RawMonitor* operations -- we can find t == null. 57 // In rare cases -- JVM_RawMonitor* operations -- we can find t == null.
58 ParkEvent * ev ; 58 ParkEvent * ev ;
59 59
60 // Start by trying to recycle an existing but unassociated 60 // Start by trying to recycle an existing but unassociated
61 // ParkEvent from the global free list. 61 // ParkEvent from the global free list.
62 // Using a spin lock since we are part of the mutex impl. 62 for (;;) {
63 // 8028280: using concurrent free list without memory management can leak 63 ev = FreeList ;
64 // pretty badly it turns out. 64 if (ev == NULL) break ;
65 Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate"); 65 // 1: Detach - sequester or privatize the list
66 { 66 // Tantamount to ev = Swap (&FreeList, NULL)
67 ev = FreeList; 67 if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
68 if (ev != NULL) { 68 continue ;
69 FreeList = ev->FreeNext; 69 }
70 } 70
71 } 71 // We've detached the list. The list in-hand is now
72 Thread::SpinRelease(&ListLock); 72 // local to this thread. This thread can operate on the
73 // list without risk of interference from other threads.
74 // 2: Extract -- pop the 1st element from the list.
75 ParkEvent * List = ev->FreeNext ;
76 if (List == NULL) break ;
77 for (;;) {
78 // 3: Try to reattach the residual list
79 guarantee (List != NULL, "invariant") ;
80 ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
81 if (Arv == NULL) break ;
82
83 // New nodes arrived. Try to detach the recent arrivals.
84 if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
85 continue ;
86 }
87 guarantee (Arv != NULL, "invariant") ;
88 // 4: Merge Arv into List
89 ParkEvent * Tail = List ;
90 while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
91 Tail->FreeNext = Arv ;
92 }
93 break ;
94 }
73 95
74 if (ev != NULL) { 96 if (ev != NULL) {
75 guarantee (ev->AssociatedWith == NULL, "invariant") ; 97 guarantee (ev->AssociatedWith == NULL, "invariant") ;
76 } else { 98 } else {
77 // Do this the hard way -- materialize a new ParkEvent. 99 // Do this the hard way -- materialize a new ParkEvent.
100 // In rare cases an allocating thread might detach a long list --
101 // installing null into FreeList -- and then stall or be obstructed.
102 // A 2nd thread calling Allocate() would see FreeList == null.
103 // The list held privately by the 1st thread is unavailable to the 2nd thread.
104 // In that case the 2nd thread would have to materialize a new ParkEvent,
105 // even though free ParkEvents existed in the system. In this case we end up
106 // with more ParkEvents in circulation than we need, but the race is
107 // rare and the outcome is benign. Ideally, the # of extant ParkEvents
108 // is equal to the maximum # of threads that existed at any one time.
109 // Because of the race mentioned above, segments of the freelist
110 // can be transiently inaccessible. At worst we may end up with the
111 // # of ParkEvents in circulation slightly above the ideal.
112 // Note that if we didn't have the TSM/immortal constraint, then
113 // when reattaching, above, we could trim the list.
78 ev = new ParkEvent () ; 114 ev = new ParkEvent () ;
79 guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ; 115 guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
80 } 116 }
81 ev->reset() ; // courtesy to caller 117 ev->reset() ; // courtesy to caller
82 ev->AssociatedWith = t ; // Associate ev with t 118 ev->AssociatedWith = t ; // Associate ev with t
86 122
87 void ParkEvent::Release (ParkEvent * ev) { 123 void ParkEvent::Release (ParkEvent * ev) {
88 if (ev == NULL) return ; 124 if (ev == NULL) return ;
89 guarantee (ev->FreeNext == NULL , "invariant") ; 125 guarantee (ev->FreeNext == NULL , "invariant") ;
90 ev->AssociatedWith = NULL ; 126 ev->AssociatedWith = NULL ;
91 // Note that if we didn't have the TSM/immortal constraint, then 127 for (;;) {
92 // when reattaching we could trim the list. 128 // Push ev onto FreeList
93 Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease"); 129 // The mechanism is "half" lock-free.
94 { 130 ParkEvent * List = FreeList ;
95 ev->FreeNext = FreeList; 131 ev->FreeNext = List ;
96 FreeList = ev; 132 if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
97 } 133 }
98 Thread::SpinRelease(&ListLock);
99 } 134 }
100 135
101 // Override operator new and delete so we can ensure that the 136 // Override operator new and delete so we can ensure that the
102 // least significant byte of ParkEvent addresses is 0. 137 // least significant byte of ParkEvent addresses is 0.
103 // Beware that excessive address alignment is undesirable 138 // Beware that excessive address alignment is undesirable
115 } 150 }
116 151
117 152
118 // 6399321 As a temporary measure we copied & modified the ParkEvent:: 153 // 6399321 As a temporary measure we copied & modified the ParkEvent::
119 // allocate() and release() code for use by Parkers. The Parker:: forms 154 // allocate() and release() code for use by Parkers. The Parker:: forms
120 // will eventually be removed as we consolidate and shift over to ParkEvents 155 // will eventually be removed as we consolide and shift over to ParkEvents
121 // for both builtin synchronization and JSR166 operations. 156 // for both builtin synchronization and JSR166 operations.
122 157
123 volatile int Parker::ListLock = 0 ; 158 volatile int Parker::ListLock = 0 ;
124 Parker * volatile Parker::FreeList = NULL ; 159 Parker * volatile Parker::FreeList = NULL ;
125 160
127 guarantee (t != NULL, "invariant") ; 162 guarantee (t != NULL, "invariant") ;
128 Parker * p ; 163 Parker * p ;
129 164
130 // Start by trying to recycle an existing but unassociated 165 // Start by trying to recycle an existing but unassociated
131 // Parker from the global free list. 166 // Parker from the global free list.
132 // 8028280: using concurrent free list without memory management can leak 167 for (;;) {
133 // pretty badly it turns out. 168 p = FreeList ;
134 Thread::SpinAcquire(&ListLock, "ParkerFreeListAllocate"); 169 if (p == NULL) break ;
135 { 170 // 1: Detach
136 p = FreeList; 171 // Tantamount to p = Swap (&FreeList, NULL)
137 if (p != NULL) { 172 if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
138 FreeList = p->FreeNext; 173 continue ;
139 } 174 }
140 } 175
141 Thread::SpinRelease(&ListLock); 176 // We've detached the list. The list in-hand is now
177 // local to this thread. This thread can operate on the
178 // list without risk of interference from other threads.
179 // 2: Extract -- pop the 1st element from the list.
180 Parker * List = p->FreeNext ;
181 if (List == NULL) break ;
182 for (;;) {
183 // 3: Try to reattach the residual list
184 guarantee (List != NULL, "invariant") ;
185 Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
186 if (Arv == NULL) break ;
187
188 // New nodes arrived. Try to detach the recent arrivals.
189 if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
190 continue ;
191 }
192 guarantee (Arv != NULL, "invariant") ;
193 // 4: Merge Arv into List
194 Parker * Tail = List ;
195 while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
196 Tail->FreeNext = Arv ;
197 }
198 break ;
199 }
142 200
143 if (p != NULL) { 201 if (p != NULL) {
144 guarantee (p->AssociatedWith == NULL, "invariant") ; 202 guarantee (p->AssociatedWith == NULL, "invariant") ;
145 } else { 203 } else {
146 // Do this the hard way -- materialize a new Parker.. 204 // Do this the hard way -- materialize a new Parker..
205 // In rare cases an allocating thread might detach
206 // a long list -- installing null into FreeList --and
207 // then stall. Another thread calling Allocate() would see
208 // FreeList == null and then invoke the ctor. In this case we
209 // end up with more Parkers in circulation than we need, but
210 // the race is rare and the outcome is benign.
211 // Ideally, the # of extant Parkers is equal to the
212 // maximum # of threads that existed at any one time.
213 // Because of the race mentioned above, segments of the
214 // freelist can be transiently inaccessible. At worst
215 // we may end up with the # of Parkers in circulation
216 // slightly above the ideal.
147 p = new Parker() ; 217 p = new Parker() ;
148 } 218 }
149 p->AssociatedWith = t ; // Associate p with t 219 p->AssociatedWith = t ; // Associate p with t
150 p->FreeNext = NULL ; 220 p->FreeNext = NULL ;
151 return p ; 221 return p ;
155 void Parker::Release (Parker * p) { 225 void Parker::Release (Parker * p) {
156 if (p == NULL) return ; 226 if (p == NULL) return ;
157 guarantee (p->AssociatedWith != NULL, "invariant") ; 227 guarantee (p->AssociatedWith != NULL, "invariant") ;
158 guarantee (p->FreeNext == NULL , "invariant") ; 228 guarantee (p->FreeNext == NULL , "invariant") ;
159 p->AssociatedWith = NULL ; 229 p->AssociatedWith = NULL ;
160 230 for (;;) {
161 Thread::SpinAcquire(&ListLock, "ParkerFreeListRelease"); 231 // Push p onto FreeList
162 { 232 Parker * List = FreeList ;
163 p->FreeNext = FreeList; 233 p->FreeNext = List ;
164 FreeList = p; 234 if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
165 } 235 }
166 Thread::SpinRelease(&ListLock); 236 }
167 } 237
168