Mercurial > hg > graal-jvmci-8
comparison src/share/vm/runtime/park.cpp @ 18041:52b4284cb496
Merge with jdk8u20-b26
author | Gilles Duboscq <duboscq@ssw.jku.at> |
---|---|
date | Wed, 15 Oct 2014 16:02:50 +0200 |
parents | 4ca6dc0799b6 |
children |
comparison
equal
deleted
inserted
replaced
17606:45d7b2c7029d | 18041:52b4284cb496 |
---|---|
57 // In rare cases -- JVM_RawMonitor* operations -- we can find t == null. | 57 // In rare cases -- JVM_RawMonitor* operations -- we can find t == null. |
58 ParkEvent * ev ; | 58 ParkEvent * ev ; |
59 | 59 |
60 // Start by trying to recycle an existing but unassociated | 60 // Start by trying to recycle an existing but unassociated |
61 // ParkEvent from the global free list. | 61 // ParkEvent from the global free list. |
62 for (;;) { | 62 // Using a spin lock since we are part of the mutex impl. |
63 ev = FreeList ; | 63 // 8028280: using concurrent free list without memory management can leak |
64 if (ev == NULL) break ; | 64 // pretty badly it turns out. |
65 // 1: Detach - sequester or privatize the list | 65 Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate"); |
66 // Tantamount to ev = Swap (&FreeList, NULL) | 66 { |
67 if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) { | 67 ev = FreeList; |
68 continue ; | 68 if (ev != NULL) { |
69 FreeList = ev->FreeNext; | |
69 } | 70 } |
70 | |
71 // We've detached the list. The list in-hand is now | |
72 // local to this thread. This thread can operate on the | |
73 // list without risk of interference from other threads. | |
74 // 2: Extract -- pop the 1st element from the list. | |
75 ParkEvent * List = ev->FreeNext ; | |
76 if (List == NULL) break ; | |
77 for (;;) { | |
78 // 3: Try to reattach the residual list | |
79 guarantee (List != NULL, "invariant") ; | |
80 ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ; | |
81 if (Arv == NULL) break ; | |
82 | |
83 // New nodes arrived. Try to detach the recent arrivals. | |
84 if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) { | |
85 continue ; | |
86 } | |
87 guarantee (Arv != NULL, "invariant") ; | |
88 // 4: Merge Arv into List | |
89 ParkEvent * Tail = List ; | |
90 while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ; | |
91 Tail->FreeNext = Arv ; | |
92 } | |
93 break ; | |
94 } | 71 } |
72 Thread::SpinRelease(&ListLock); | |
95 | 73 |
96 if (ev != NULL) { | 74 if (ev != NULL) { |
97 guarantee (ev->AssociatedWith == NULL, "invariant") ; | 75 guarantee (ev->AssociatedWith == NULL, "invariant") ; |
98 } else { | 76 } else { |
99 // Do this the hard way -- materialize a new ParkEvent. | 77 // Do this the hard way -- materialize a new ParkEvent. |
100 // In rare cases an allocating thread might detach a long list -- | |
101 // installing null into FreeList -- and then stall or be obstructed. | |
102 // A 2nd thread calling Allocate() would see FreeList == null. | |
103 // The list held privately by the 1st thread is unavailable to the 2nd thread. | |
104 // In that case the 2nd thread would have to materialize a new ParkEvent, | |
105 // even though free ParkEvents existed in the system. In this case we end up | |
106 // with more ParkEvents in circulation than we need, but the race is | |
107 // rare and the outcome is benign. Ideally, the # of extant ParkEvents | |
108 // is equal to the maximum # of threads that existed at any one time. | |
109 // Because of the race mentioned above, segments of the freelist | |
110 // can be transiently inaccessible. At worst we may end up with the | |
111 // # of ParkEvents in circulation slightly above the ideal. | |
112 // Note that if we didn't have the TSM/immortal constraint, then | |
113 // when reattaching, above, we could trim the list. | |
114 ev = new ParkEvent () ; | 78 ev = new ParkEvent () ; |
115 guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ; | 79 guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ; |
116 } | 80 } |
117 ev->reset() ; // courtesy to caller | 81 ev->reset() ; // courtesy to caller |
118 ev->AssociatedWith = t ; // Associate ev with t | 82 ev->AssociatedWith = t ; // Associate ev with t |
122 | 86 |
123 void ParkEvent::Release (ParkEvent * ev) { | 87 void ParkEvent::Release (ParkEvent * ev) { |
124 if (ev == NULL) return ; | 88 if (ev == NULL) return ; |
125 guarantee (ev->FreeNext == NULL , "invariant") ; | 89 guarantee (ev->FreeNext == NULL , "invariant") ; |
126 ev->AssociatedWith = NULL ; | 90 ev->AssociatedWith = NULL ; |
127 for (;;) { | 91 // Note that if we didn't have the TSM/immortal constraint, then |
128 // Push ev onto FreeList | 92 // when reattaching we could trim the list. |
129 // The mechanism is "half" lock-free. | 93 Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease"); |
130 ParkEvent * List = FreeList ; | 94 { |
131 ev->FreeNext = List ; | 95 ev->FreeNext = FreeList; |
132 if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ; | 96 FreeList = ev; |
133 } | 97 } |
98 Thread::SpinRelease(&ListLock); | |
134 } | 99 } |
135 | 100 |
136 // Override operator new and delete so we can ensure that the | 101 // Override operator new and delete so we can ensure that the |
137 // least significant byte of ParkEvent addresses is 0. | 102 // least significant byte of ParkEvent addresses is 0. |
138 // Beware that excessive address alignment is undesirable | 103 // Beware that excessive address alignment is undesirable |
162 guarantee (t != NULL, "invariant") ; | 127 guarantee (t != NULL, "invariant") ; |
163 Parker * p ; | 128 Parker * p ; |
164 | 129 |
165 // Start by trying to recycle an existing but unassociated | 130 // Start by trying to recycle an existing but unassociated |
166 // Parker from the global free list. | 131 // Parker from the global free list. |
167 for (;;) { | 132 // 8028280: using concurrent free list without memory management can leak |
168 p = FreeList ; | 133 // pretty badly it turns out. |
169 if (p == NULL) break ; | 134 Thread::SpinAcquire(&ListLock, "ParkerFreeListAllocate"); |
170 // 1: Detach | 135 { |
171 // Tantamount to p = Swap (&FreeList, NULL) | 136 p = FreeList; |
172 if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) { | 137 if (p != NULL) { |
173 continue ; | 138 FreeList = p->FreeNext; |
174 } | 139 } |
175 | |
176 // We've detached the list. The list in-hand is now | |
177 // local to this thread. This thread can operate on the | |
178 // list without risk of interference from other threads. | |
179 // 2: Extract -- pop the 1st element from the list. | |
180 Parker * List = p->FreeNext ; | |
181 if (List == NULL) break ; | |
182 for (;;) { | |
183 // 3: Try to reattach the residual list | |
184 guarantee (List != NULL, "invariant") ; | |
185 Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ; | |
186 if (Arv == NULL) break ; | |
187 | |
188 // New nodes arrived. Try to detach the recent arrivals. | |
189 if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) { | |
190 continue ; | |
191 } | |
192 guarantee (Arv != NULL, "invariant") ; | |
193 // 4: Merge Arv into List | |
194 Parker * Tail = List ; | |
195 while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ; | |
196 Tail->FreeNext = Arv ; | |
197 } | |
198 break ; | |
199 } | 140 } |
141 Thread::SpinRelease(&ListLock); | |
200 | 142 |
201 if (p != NULL) { | 143 if (p != NULL) { |
202 guarantee (p->AssociatedWith == NULL, "invariant") ; | 144 guarantee (p->AssociatedWith == NULL, "invariant") ; |
203 } else { | 145 } else { |
204 // Do this the hard way -- materialize a new Parker.. | 146 // Do this the hard way -- materialize a new Parker.. |
205 // In rare cases an allocating thread might detach | |
206 // a long list -- installing null into FreeList --and | |
207 // then stall. Another thread calling Allocate() would see | |
208 // FreeList == null and then invoke the ctor. In this case we | |
209 // end up with more Parkers in circulation than we need, but | |
210 // the race is rare and the outcome is benign. | |
211 // Ideally, the # of extant Parkers is equal to the | |
212 // maximum # of threads that existed at any one time. | |
213 // Because of the race mentioned above, segments of the | |
214 // freelist can be transiently inaccessible. At worst | |
215 // we may end up with the # of Parkers in circulation | |
216 // slightly above the ideal. | |
217 p = new Parker() ; | 147 p = new Parker() ; |
218 } | 148 } |
219 p->AssociatedWith = t ; // Associate p with t | 149 p->AssociatedWith = t ; // Associate p with t |
220 p->FreeNext = NULL ; | 150 p->FreeNext = NULL ; |
221 return p ; | 151 return p ; |
225 void Parker::Release (Parker * p) { | 155 void Parker::Release (Parker * p) { |
226 if (p == NULL) return ; | 156 if (p == NULL) return ; |
227 guarantee (p->AssociatedWith != NULL, "invariant") ; | 157 guarantee (p->AssociatedWith != NULL, "invariant") ; |
228 guarantee (p->FreeNext == NULL , "invariant") ; | 158 guarantee (p->FreeNext == NULL , "invariant") ; |
229 p->AssociatedWith = NULL ; | 159 p->AssociatedWith = NULL ; |
230 for (;;) { | 160 |
231 // Push p onto FreeList | 161 Thread::SpinAcquire(&ListLock, "ParkerFreeListRelease"); |
232 Parker * List = FreeList ; | 162 { |
233 p->FreeNext = List ; | 163 p->FreeNext = FreeList; |
234 if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ; | 164 FreeList = p; |
235 } | 165 } |
166 Thread::SpinRelease(&ListLock); | |
236 } | 167 } |
237 | 168 |