comparison src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children d0994e5bebce
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2003 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 // Implementation of class OrderAccess.
26
27 inline void OrderAccess::loadload() { acquire(); }
28 inline void OrderAccess::storestore() { release(); }
29 inline void OrderAccess::loadstore() { acquire(); }
30 inline void OrderAccess::storeload() { fence(); }
31
32 inline void OrderAccess::acquire() {
33 volatile intptr_t dummy;
34 #ifdef AMD64
35 __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (dummy) : : "memory");
36 #else
37 __asm__ volatile ("movl 0(%%esp),%0" : "=r" (dummy) : : "memory");
38 #endif // AMD64
39 }
40
41 inline void OrderAccess::release() {
42 dummy = 0;
43 }
44
45 inline void OrderAccess::fence() {
46 if (os::is_MP()) {
47 #ifdef AMD64
48 __asm__ __volatile__ ("mfence":::"memory");
49 #else
50 __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
51 #endif // AMD64
52 }
53 }
54
55 inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
56 inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
57 inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
58 inline jlong OrderAccess::load_acquire(volatile jlong* p) { return *p; }
59 inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
60 inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
61 inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
62 inline julong OrderAccess::load_acquire(volatile julong* p) { return *p; }
63 inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
64 inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
65
66 inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; }
67 inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; }
68 inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
69
70 inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
71 inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
72 inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
73 inline void OrderAccess::release_store(volatile jlong* p, jlong v) { *p = v; }
74 inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
75 inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
76 inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
77 inline void OrderAccess::release_store(volatile julong* p, julong v) { *p = v; }
78 inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
79 inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
80
81 inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
82 inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; }
83
84 inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
85 __asm__ volatile ( "xchgb (%2),%0"
86 : "=r" (v)
87 : "0" (v), "r" (p)
88 : "memory");
89 }
90 inline void OrderAccess::store_fence(jshort* p, jshort v) {
91 __asm__ volatile ( "xchgw (%2),%0"
92 : "=r" (v)
93 : "0" (v), "r" (p)
94 : "memory");
95 }
96 inline void OrderAccess::store_fence(jint* p, jint v) {
97 __asm__ volatile ( "xchgl (%2),%0"
98 : "=r" (v)
99 : "0" (v), "r" (p)
100 : "memory");
101 }
102
103 inline void OrderAccess::store_fence(jlong* p, jlong v) {
104 #ifdef AMD64
105 __asm__ __volatile__ ("xchgq (%2), %0"
106 : "=r" (v)
107 : "0" (v), "r" (p)
108 : "memory");
109 #else
110 *p = v; fence();
111 #endif // AMD64
112 }
113
114 // AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
115 // compiler does the inlining this is simpler.
116 inline void OrderAccess::store_fence(jubyte* p, jubyte v) { store_fence((jbyte*)p, (jbyte)v); }
117 inline void OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
118 inline void OrderAccess::store_fence(juint* p, juint v) { store_fence((jint*)p, (jint)v); }
119 inline void OrderAccess::store_fence(julong* p, julong v) { store_fence((jlong*)p, (jlong)v); }
120 inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
121 inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
122
123 inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
124 #ifdef AMD64
125 __asm__ __volatile__ ("xchgq (%2), %0"
126 : "=r" (v)
127 : "0" (v), "r" (p)
128 : "memory");
129 #else
130 store_fence((jint*)p, (jint)v);
131 #endif // AMD64
132 }
133
134 inline void OrderAccess::store_ptr_fence(void** p, void* v) {
135 #ifdef AMD64
136 __asm__ __volatile__ ("xchgq (%2), %0"
137 : "=r" (v)
138 : "0" (v), "r" (p)
139 : "memory");
140 #else
141 store_fence((jint*)p, (jint)v);
142 #endif // AMD64
143 }
144
145 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
146 inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) {
147 __asm__ volatile ( "xchgb (%2),%0"
148 : "=r" (v)
149 : "0" (v), "r" (p)
150 : "memory");
151 }
152 inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
153 __asm__ volatile ( "xchgw (%2),%0"
154 : "=r" (v)
155 : "0" (v), "r" (p)
156 : "memory");
157 }
158 inline void OrderAccess::release_store_fence(volatile jint* p, jint v) {
159 __asm__ volatile ( "xchgl (%2),%0"
160 : "=r" (v)
161 : "0" (v), "r" (p)
162 : "memory");
163 }
164
165 inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) {
166 #ifdef AMD64
167 __asm__ __volatile__ ( "xchgq (%2), %0"
168 : "=r" (v)
169 : "0" (v), "r" (p)
170 : "memory");
171 #else
172 *p = v; fence();
173 #endif // AMD64
174 }
175
176 inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); }
177 inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
178 inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store_fence((volatile jint*)p, (jint)v); }
179 inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); }
180
181 inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
182 inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
183
184 inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
185 #ifdef AMD64
186 __asm__ __volatile__ ( "xchgq (%2), %0"
187 : "=r" (v)
188 : "0" (v), "r" (p)
189 : "memory");
190 #else
191 release_store_fence((volatile jint*)p, (jint)v);
192 #endif // AMD64
193 }
194 inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) {
195 #ifdef AMD64
196 __asm__ __volatile__ ( "xchgq (%2), %0"
197 : "=r" (v)
198 : "0" (v), "r" (p)
199 : "memory");
200 #else
201 release_store_fence((volatile jint*)p, (jint)v);
202 #endif // AMD64
203 }