comparison src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp @ 1010:354d3184f6b2

6890308: integrate zero assembler hotspot changes Reviewed-by: never Contributed-by: gbenson@redhat.com
author never
date Tue, 13 Oct 2009 12:04:21 -0700
parents
children c18cbe5936b8
comparison
equal deleted inserted replaced
1009:03b336640699 1010:354d3184f6b2
1 /*
2 * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
3 * Copyright 2007, 2008 Red Hat, Inc.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 */
25
26 // Implementation of class atomic
27
28 #ifdef M68K
29
30 /*
31 * __m68k_cmpxchg
32 *
33 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
34 * Returns newval on success and oldval if no exchange happened.
35 * This implementation is processor specific and works on
36 * 68020 68030 68040 and 68060.
37 *
38 * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
39 * instruction.
40 * Using a kernelhelper would be better for arch complete implementation.
41 *
42 */
43
44 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
45 int ret;
46 __asm __volatile ("cas%.l %0,%2,%1"
47 : "=d" (ret), "+m" (*(ptr))
48 : "d" (newval), "0" (oldval));
49 return ret;
50 }
51
52 /* Perform an atomic compare and swap: if the current value of `*PTR'
53 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
54 `*PTR' before the operation.*/
55 static inline int m68k_compare_and_swap(volatile int *ptr,
56 int oldval,
57 int newval) {
58 for (;;) {
59 int prev = *ptr;
60 if (prev != oldval)
61 return prev;
62
63 if (__m68k_cmpxchg (prev, newval, ptr) == newval)
64 // Success.
65 return prev;
66
67 // We failed even though prev == oldval. Try again.
68 }
69 }
70
71 /* Atomically add an int to memory. */
72 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
73 for (;;) {
74 // Loop until success.
75
76 int prev = *ptr;
77
78 if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
79 return prev + add_value;
80 }
81 }
82
83 /* Atomically write VALUE into `*PTR' and returns the previous
84 contents of `*PTR'. */
85 static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
86 for (;;) {
87 // Loop until success.
88 int prev = *ptr;
89
90 if (__m68k_cmpxchg (prev, newval, ptr) == prev)
91 return prev;
92 }
93 }
94 #endif // M68K
95
96 #ifdef ARM
97
98 /*
99 * __kernel_cmpxchg
100 *
101 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
102 * Return zero if *ptr was changed or non-zero if no exchange happened.
103 * The C flag is also set if *ptr was changed to allow for assembly
104 * optimization in the calling code.
105 *
106 */
107
108 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
109 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
110
111
112
113 /* Perform an atomic compare and swap: if the current value of `*PTR'
114 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
115 `*PTR' before the operation.*/
116 static inline int arm_compare_and_swap(volatile int *ptr,
117 int oldval,
118 int newval) {
119 for (;;) {
120 int prev = *ptr;
121 if (prev != oldval)
122 return prev;
123
124 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
125 // Success.
126 return prev;
127
128 // We failed even though prev == oldval. Try again.
129 }
130 }
131
132 /* Atomically add an int to memory. */
133 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
134 for (;;) {
135 // Loop until a __kernel_cmpxchg succeeds.
136
137 int prev = *ptr;
138
139 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
140 return prev + add_value;
141 }
142 }
143
144 /* Atomically write VALUE into `*PTR' and returns the previous
145 contents of `*PTR'. */
146 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
147 for (;;) {
148 // Loop until a __kernel_cmpxchg succeeds.
149 int prev = *ptr;
150
151 if (__kernel_cmpxchg (prev, newval, ptr) == 0)
152 return prev;
153 }
154 }
155 #endif // ARM
156
157 inline void Atomic::store(jint store_value, volatile jint* dest) {
158 *dest = store_value;
159 }
160
161 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
162 *dest = store_value;
163 }
164
165 inline jint Atomic::add(jint add_value, volatile jint* dest) {
166 #ifdef ARM
167 return arm_add_and_fetch(dest, add_value);
168 #else
169 #ifdef M68K
170 return m68k_add_and_fetch(dest, add_value);
171 #else
172 return __sync_add_and_fetch(dest, add_value);
173 #endif // M68K
174 #endif // ARM
175 }
176
177 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
178 #ifdef ARM
179 return arm_add_and_fetch(dest, add_value);
180 #else
181 #ifdef M68K
182 return m68k_add_and_fetch(dest, add_value);
183 #else
184 return __sync_add_and_fetch(dest, add_value);
185 #endif // M68K
186 #endif // ARM
187 }
188
189 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
190 return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
191 }
192
193 inline void Atomic::inc(volatile jint* dest) {
194 add(1, dest);
195 }
196
197 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
198 add_ptr(1, dest);
199 }
200
201 inline void Atomic::inc_ptr(volatile void* dest) {
202 add_ptr(1, dest);
203 }
204
205 inline void Atomic::dec(volatile jint* dest) {
206 add(-1, dest);
207 }
208
209 inline void Atomic::dec_ptr(volatile intptr_t* dest) {
210 add_ptr(-1, dest);
211 }
212
213 inline void Atomic::dec_ptr(volatile void* dest) {
214 add_ptr(-1, dest);
215 }
216
217 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
218 #ifdef ARM
219 return arm_lock_test_and_set(dest, exchange_value);
220 #else
221 #ifdef M68K
222 return m68k_lock_test_and_set(dest, exchange_value);
223 #else
224 // __sync_lock_test_and_set is a bizarrely named atomic exchange
225 // operation. Note that some platforms only support this with the
226 // limitation that the only valid value to store is the immediate
227 // constant 1. There is a test for this in JNI_CreateJavaVM().
228 return __sync_lock_test_and_set (dest, exchange_value);
229 #endif // M68K
230 #endif // ARM
231 }
232
233 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
234 volatile intptr_t* dest) {
235 #ifdef ARM
236 return arm_lock_test_and_set(dest, exchange_value);
237 #else
238 #ifdef M68K
239 return m68k_lock_test_and_set(dest, exchange_value);
240 #else
241 return __sync_lock_test_and_set (dest, exchange_value);
242 #endif // M68K
243 #endif // ARM
244 }
245
246 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
247 return (void *) xchg_ptr((intptr_t) exchange_value,
248 (volatile intptr_t*) dest);
249 }
250
251 inline jint Atomic::cmpxchg(jint exchange_value,
252 volatile jint* dest,
253 jint compare_value) {
254 #ifdef ARM
255 return arm_compare_and_swap(dest, compare_value, exchange_value);
256 #else
257 #ifdef M68K
258 return m68k_compare_and_swap(dest, compare_value, exchange_value);
259 #else
260 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
261 #endif // M68K
262 #endif // ARM
263 }
264
265 inline jlong Atomic::cmpxchg(jlong exchange_value,
266 volatile jlong* dest,
267 jlong compare_value) {
268
269 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
270 }
271
272 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
273 volatile intptr_t* dest,
274 intptr_t compare_value) {
275 #ifdef ARM
276 return arm_compare_and_swap(dest, compare_value, exchange_value);
277 #else
278 #ifdef M68K
279 return m68k_compare_and_swap(dest, compare_value, exchange_value);
280 #else
281 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
282 #endif // M68K
283 #endif // ARM
284 }
285
286 inline void* Atomic::cmpxchg_ptr(void* exchange_value,
287 volatile void* dest,
288 void* compare_value) {
289
290 return (void *) cmpxchg_ptr((intptr_t) exchange_value,
291 (volatile intptr_t*) dest,
292 (intptr_t) compare_value);
293 }