Mercurial > hg > truffle
annotate src/os_cpu/bsd_zero/vm/atomic_bsd_zero.inline.hpp @ 8804:91bf0bdae37b
8008217: CDS: Class data sharing limits the malloc heap on Solaris
Summary: In 64bit VM move CDS archive address to 32G on all platforms using new flag SharedBaseAddress. In 32bit VM set CDS archive address to 3Gb on Linux and let other OSs pick the address.
Reviewed-by: kvn, dcubed, zgu, hseigel
author | coleenp |
---|---|
date | Wed, 20 Mar 2013 08:04:54 -0400 |
parents | 63e54c37ac64 |
children |
rev | line source |
---|---|
3960 | 1 /* |
8675
63e54c37ac64
8008959: Fix non-PCH build on Linux, Windows and MacOS X
simonis
parents:
3960
diff
changeset
|
2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. |
3960 | 3 * Copyright 2007, 2008, 2011 Red Hat, Inc. |
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 * | |
6 * This code is free software; you can redistribute it and/or modify it | |
7 * under the terms of the GNU General Public License version 2 only, as | |
8 * published by the Free Software Foundation. | |
9 * | |
10 * This code is distributed in the hope that it will be useful, but WITHOUT | |
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
13 * version 2 for more details (a copy is included in the LICENSE file that | |
14 * accompanied this code). | |
15 * | |
16 * You should have received a copy of the GNU General Public License version | |
17 * 2 along with this work; if not, write to the Free Software Foundation, | |
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
19 * | |
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |
21 * or visit www.oracle.com if you need additional information or have any | |
22 * questions. | |
23 * | |
24 */ | |
25 | |
26 #ifndef OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP | |
27 #define OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP | |
28 | |
29 #include "runtime/atomic.hpp" | |
30 #include "runtime/os.hpp" | |
31 #include "vm_version_zero.hpp" | |
32 | |
33 // Implementation of class atomic | |
34 | |
35 #ifdef M68K | |
36 | |
37 /* | |
38 * __m68k_cmpxchg | |
39 * | |
40 * Atomically store newval in *ptr if *ptr is equal to oldval for user space. | |
41 * Returns newval on success and oldval if no exchange happened. | |
42 * This implementation is processor specific and works on | |
43 * 68020 68030 68040 and 68060. | |
44 * | |
45 * It will not work on ColdFire, 68000 and 68010 since they lack the CAS | |
46 * instruction. | |
47 * Using a kernelhelper would be better for arch complete implementation. | |
48 * | |
49 */ | |
50 | |
51 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) { | |
52 int ret; | |
53 __asm __volatile ("cas%.l %0,%2,%1" | |
54 : "=d" (ret), "+m" (*(ptr)) | |
55 : "d" (newval), "0" (oldval)); | |
56 return ret; | |
57 } | |
58 | |
59 /* Perform an atomic compare and swap: if the current value of `*PTR' | |
60 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of | |
61 `*PTR' before the operation.*/ | |
62 static inline int m68k_compare_and_swap(volatile int *ptr, | |
63 int oldval, | |
64 int newval) { | |
65 for (;;) { | |
66 int prev = *ptr; | |
67 if (prev != oldval) | |
68 return prev; | |
69 | |
70 if (__m68k_cmpxchg (prev, newval, ptr) == newval) | |
71 // Success. | |
72 return prev; | |
73 | |
74 // We failed even though prev == oldval. Try again. | |
75 } | |
76 } | |
77 | |
78 /* Atomically add an int to memory. */ | |
79 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) { | |
80 for (;;) { | |
81 // Loop until success. | |
82 | |
83 int prev = *ptr; | |
84 | |
85 if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value) | |
86 return prev + add_value; | |
87 } | |
88 } | |
89 | |
90 /* Atomically write VALUE into `*PTR' and returns the previous | |
91 contents of `*PTR'. */ | |
92 static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) { | |
93 for (;;) { | |
94 // Loop until success. | |
95 int prev = *ptr; | |
96 | |
97 if (__m68k_cmpxchg (prev, newval, ptr) == prev) | |
98 return prev; | |
99 } | |
100 } | |
101 #endif // M68K | |
102 | |
103 #ifdef ARM | |
104 | |
105 /* | |
106 * __kernel_cmpxchg | |
107 * | |
108 * Atomically store newval in *ptr if *ptr is equal to oldval for user space. | |
109 * Return zero if *ptr was changed or non-zero if no exchange happened. | |
110 * The C flag is also set if *ptr was changed to allow for assembly | |
111 * optimization in the calling code. | |
112 * | |
113 */ | |
114 | |
115 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr); | |
116 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0) | |
117 | |
118 | |
119 | |
120 /* Perform an atomic compare and swap: if the current value of `*PTR' | |
121 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of | |
122 `*PTR' before the operation.*/ | |
123 static inline int arm_compare_and_swap(volatile int *ptr, | |
124 int oldval, | |
125 int newval) { | |
126 for (;;) { | |
127 int prev = *ptr; | |
128 if (prev != oldval) | |
129 return prev; | |
130 | |
131 if (__kernel_cmpxchg (prev, newval, ptr) == 0) | |
132 // Success. | |
133 return prev; | |
134 | |
135 // We failed even though prev == oldval. Try again. | |
136 } | |
137 } | |
138 | |
139 /* Atomically add an int to memory. */ | |
140 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) { | |
141 for (;;) { | |
142 // Loop until a __kernel_cmpxchg succeeds. | |
143 | |
144 int prev = *ptr; | |
145 | |
146 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0) | |
147 return prev + add_value; | |
148 } | |
149 } | |
150 | |
151 /* Atomically write VALUE into `*PTR' and returns the previous | |
152 contents of `*PTR'. */ | |
153 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) { | |
154 for (;;) { | |
155 // Loop until a __kernel_cmpxchg succeeds. | |
156 int prev = *ptr; | |
157 | |
158 if (__kernel_cmpxchg (prev, newval, ptr) == 0) | |
159 return prev; | |
160 } | |
161 } | |
162 #endif // ARM | |
163 | |
164 inline void Atomic::store(jint store_value, volatile jint* dest) { | |
165 #if !defined(ARM) && !defined(M68K) | |
166 __sync_synchronize(); | |
167 #endif | |
168 *dest = store_value; | |
169 } | |
170 | |
171 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { | |
172 #if !defined(ARM) && !defined(M68K) | |
173 __sync_synchronize(); | |
174 #endif | |
175 *dest = store_value; | |
176 } | |
177 | |
178 inline jint Atomic::add(jint add_value, volatile jint* dest) { | |
179 #ifdef ARM | |
180 return arm_add_and_fetch(dest, add_value); | |
181 #else | |
182 #ifdef M68K | |
183 return m68k_add_and_fetch(dest, add_value); | |
184 #else | |
185 return __sync_add_and_fetch(dest, add_value); | |
186 #endif // M68K | |
187 #endif // ARM | |
188 } | |
189 | |
190 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { | |
191 #ifdef ARM | |
192 return arm_add_and_fetch(dest, add_value); | |
193 #else | |
194 #ifdef M68K | |
195 return m68k_add_and_fetch(dest, add_value); | |
196 #else | |
197 return __sync_add_and_fetch(dest, add_value); | |
198 #endif // M68K | |
199 #endif // ARM | |
200 } | |
201 | |
202 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { | |
203 return (void *) add_ptr(add_value, (volatile intptr_t *) dest); | |
204 } | |
205 | |
206 inline void Atomic::inc(volatile jint* dest) { | |
207 add(1, dest); | |
208 } | |
209 | |
210 inline void Atomic::inc_ptr(volatile intptr_t* dest) { | |
211 add_ptr(1, dest); | |
212 } | |
213 | |
214 inline void Atomic::inc_ptr(volatile void* dest) { | |
215 add_ptr(1, dest); | |
216 } | |
217 | |
218 inline void Atomic::dec(volatile jint* dest) { | |
219 add(-1, dest); | |
220 } | |
221 | |
222 inline void Atomic::dec_ptr(volatile intptr_t* dest) { | |
223 add_ptr(-1, dest); | |
224 } | |
225 | |
226 inline void Atomic::dec_ptr(volatile void* dest) { | |
227 add_ptr(-1, dest); | |
228 } | |
229 | |
230 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { | |
231 #ifdef ARM | |
232 return arm_lock_test_and_set(dest, exchange_value); | |
233 #else | |
234 #ifdef M68K | |
235 return m68k_lock_test_and_set(dest, exchange_value); | |
236 #else | |
237 // __sync_lock_test_and_set is a bizarrely named atomic exchange | |
238 // operation. Note that some platforms only support this with the | |
239 // limitation that the only valid value to store is the immediate | |
240 // constant 1. There is a test for this in JNI_CreateJavaVM(). | |
241 return __sync_lock_test_and_set (dest, exchange_value); | |
242 #endif // M68K | |
243 #endif // ARM | |
244 } | |
245 | |
246 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, | |
247 volatile intptr_t* dest) { | |
248 #ifdef ARM | |
249 return arm_lock_test_and_set(dest, exchange_value); | |
250 #else | |
251 #ifdef M68K | |
252 return m68k_lock_test_and_set(dest, exchange_value); | |
253 #else | |
254 return __sync_lock_test_and_set (dest, exchange_value); | |
255 #endif // M68K | |
256 #endif // ARM | |
257 } | |
258 | |
259 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { | |
260 return (void *) xchg_ptr((intptr_t) exchange_value, | |
261 (volatile intptr_t*) dest); | |
262 } | |
263 | |
264 inline jint Atomic::cmpxchg(jint exchange_value, | |
265 volatile jint* dest, | |
266 jint compare_value) { | |
267 #ifdef ARM | |
268 return arm_compare_and_swap(dest, compare_value, exchange_value); | |
269 #else | |
270 #ifdef M68K | |
271 return m68k_compare_and_swap(dest, compare_value, exchange_value); | |
272 #else | |
273 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); | |
274 #endif // M68K | |
275 #endif // ARM | |
276 } | |
277 | |
278 inline jlong Atomic::cmpxchg(jlong exchange_value, | |
279 volatile jlong* dest, | |
280 jlong compare_value) { | |
281 | |
282 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); | |
283 } | |
284 | |
285 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, | |
286 volatile intptr_t* dest, | |
287 intptr_t compare_value) { | |
288 #ifdef ARM | |
289 return arm_compare_and_swap(dest, compare_value, exchange_value); | |
290 #else | |
291 #ifdef M68K | |
292 return m68k_compare_and_swap(dest, compare_value, exchange_value); | |
293 #else | |
294 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); | |
295 #endif // M68K | |
296 #endif // ARM | |
297 } | |
298 | |
299 inline void* Atomic::cmpxchg_ptr(void* exchange_value, | |
300 volatile void* dest, | |
301 void* compare_value) { | |
302 | |
303 return (void *) cmpxchg_ptr((intptr_t) exchange_value, | |
304 (volatile intptr_t*) dest, | |
305 (intptr_t) compare_value); | |
306 } | |
307 | |
308 inline jlong Atomic::load(volatile jlong* src) { | |
309 volatile jlong dest; | |
310 os::atomic_copy64(src, &dest); | |
311 return dest; | |
312 } | |
313 | |
314 inline void Atomic::store(jlong store_value, jlong* dest) { | |
315 os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest); | |
316 } | |
317 | |
318 inline void Atomic::store(jlong store_value, volatile jlong* dest) { | |
319 os::atomic_copy64((volatile jlong*)&store_value, dest); | |
320 } | |
321 | |
322 #endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_INLINE_HPP |