Mercurial > hg > truffle
annotate src/cpu/sparc/vm/nativeInst_sparc.cpp @ 18527:b31ae5af9fa3
Merge.
author | Doug Simon <doug.simon@oracle.com> |
---|---|
date | Wed, 26 Nov 2014 12:51:31 +0100 |
parents | cefad50507d8 |
children | b51e29501f30 |
rev | line source |
---|---|
0 | 1 /* |
12316
190899198332
7195622: CheckUnhandledOops has limited usefulness now
hseigel
parents:
10997
diff
changeset
|
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
727
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
727
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
727
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
7204
f0c2369fda5a
8003250: SPARC: move MacroAssembler into separate file
twisti
parents:
6725
diff
changeset
|
26 #include "asm/macroAssembler.hpp" |
9934
0f7ca53be929
CR-806: Changes to build Graal for SPARC
Morris Meyer <morris.meyer@oracle.com>
parents:
7204
diff
changeset
|
27 #include "asm/macroAssembler.inline.hpp" |
0f7ca53be929
CR-806: Changes to build Graal for SPARC
Morris Meyer <morris.meyer@oracle.com>
parents:
7204
diff
changeset
|
28 #include "code/codeCache.hpp" |
1972 | 29 #include "memory/resourceArea.hpp" |
30 #include "nativeInst_sparc.hpp" | |
31 #include "oops/oop.inline.hpp" | |
32 #include "runtime/handles.hpp" | |
33 #include "runtime/sharedRuntime.hpp" | |
34 #include "runtime/stubRoutines.hpp" | |
35 #include "utilities/ostream.hpp" | |
36 #ifdef COMPILER1 | |
37 #include "c1/c1_Runtime1.hpp" | |
38 #endif | |
0 | 39 |
40 | |
116
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
0
diff
changeset
|
41 bool NativeInstruction::is_dtrace_trap() { |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
0
diff
changeset
|
42 return !is_nop(); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
0
diff
changeset
|
43 } |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
0
diff
changeset
|
44 |
0 | 45 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) { |
46 ResourceMark rm; | |
47 CodeBuffer buf(instaddr, 10 * BytesPerInstWord ); | |
48 MacroAssembler* _masm = new MacroAssembler(&buf); | |
49 Register destreg; | |
50 | |
51 destreg = inv_rd(*(unsigned int *)instaddr); | |
52 // Generate a the new sequence | |
727 | 53 _masm->patchable_sethi(x, destreg); |
0 | 54 ICache::invalidate_range(instaddr, 7 * BytesPerInstWord); |
55 } | |
56 | |
2375
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
57 void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) { |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
58 ResourceMark rm; |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
59 unsigned char buffer[10 * BytesPerInstWord]; |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
60 CodeBuffer buf(buffer, 10 * BytesPerInstWord); |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
61 MacroAssembler masm(&buf); |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
62 |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
63 Register destreg = inv_rd(*(unsigned int *)instaddr); |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
64 // Generate the proper sequence into a temporary buffer and compare |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
65 // it with the original sequence. |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
66 masm.patchable_sethi(x, destreg); |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
67 int len = buffer - masm.pc(); |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
68 for (int i = 0; i < len; i++) { |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
69 assert(instaddr[i] == buffer[i], "instructions must match"); |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
70 } |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
71 } |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
72 |
0 | 73 void NativeInstruction::verify() { |
74 // make sure code pattern is actually an instruction address | |
75 address addr = addr_at(0); | |
76 if (addr == 0 || ((intptr_t)addr & 3) != 0) { | |
77 fatal("not an instruction address"); | |
78 } | |
79 } | |
80 | |
81 void NativeInstruction::print() { | |
82 tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0)); | |
83 } | |
84 | |
85 void NativeInstruction::set_long_at(int offset, int i) { | |
86 address addr = addr_at(offset); | |
87 *(int*)addr = i; | |
88 ICache::invalidate_word(addr); | |
89 } | |
90 | |
91 void NativeInstruction::set_jlong_at(int offset, jlong i) { | |
92 address addr = addr_at(offset); | |
93 *(jlong*)addr = i; | |
94 // Don't need to invalidate 2 words here, because | |
95 // the flush instruction operates on doublewords. | |
96 ICache::invalidate_word(addr); | |
97 } | |
98 | |
99 void NativeInstruction::set_addr_at(int offset, address x) { | |
100 address addr = addr_at(offset); | |
101 assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment"); | |
102 *(uintptr_t*)addr = (uintptr_t)x; | |
103 // Don't need to invalidate 2 words here in the 64-bit case, | |
104 // because the flush instruction operates on doublewords. | |
105 ICache::invalidate_word(addr); | |
106 // The Intel code has this assertion for NativeCall::set_destination, | |
107 // NativeMovConstReg::set_data, NativeMovRegMem::set_offset, | |
108 // NativeJump::set_jump_destination, and NativePushImm32::set_data | |
109 //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction") | |
110 } | |
111 | |
112 bool NativeInstruction::is_zero_test(Register ®) { | |
113 int x = long_at(0); | |
114 Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3); | |
115 if (is_op3(x, temp, Assembler::arith_op) && | |
116 inv_immed(x) && inv_rd(x) == G0) { | |
117 if (inv_rs1(x) == G0) { | |
118 reg = inv_rs2(x); | |
119 return true; | |
120 } else if (inv_rs2(x) == G0) { | |
121 reg = inv_rs1(x); | |
122 return true; | |
123 } | |
124 } | |
125 return false; | |
126 } | |
127 | |
128 bool NativeInstruction::is_load_store_with_small_offset(Register reg) { | |
129 int x = long_at(0); | |
130 if (is_op(x, Assembler::ldst_op) && | |
131 inv_rs1(x) == reg && inv_immed(x)) { | |
132 return true; | |
133 } | |
134 return false; | |
135 } | |
136 | |
137 void NativeCall::verify() { | |
138 NativeInstruction::verify(); | |
139 // make sure code pattern is actually a call instruction | |
140 if (!is_op(long_at(0), Assembler::call_op)) { | |
141 fatal("not a call"); | |
142 } | |
143 } | |
144 | |
145 void NativeCall::print() { | |
146 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination()); | |
147 } | |
148 | |
149 | |
150 // MT-safe patching of a call instruction (and following word). | |
151 // First patches the second word, and then atomicly replaces | |
152 // the first word with the first new instruction word. | |
153 // Other processors might briefly see the old first word | |
154 // followed by the new second word. This is OK if the old | |
155 // second word is harmless, and the new second word may be | |
156 // harmlessly executed in the delay slot of the call. | |
157 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { | |
158 assert(Patching_lock->is_locked() || | |
159 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); | |
160 assert (instr_addr != NULL, "illegal address for code patching"); | |
161 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call | |
162 assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8"); | |
163 int i0 = ((int*)code_buffer)[0]; | |
164 int i1 = ((int*)code_buffer)[1]; | |
165 int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord); | |
166 assert(inv_op(*contention_addr) == Assembler::arith_op || | |
10997 | 167 *contention_addr == nop_instruction(), |
0 | 168 "must not interfere with original call"); |
169 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order | |
170 n_call->set_long_at(1*BytesPerInstWord, i1); | |
171 n_call->set_long_at(0*BytesPerInstWord, i0); | |
172 // NOTE: It is possible that another thread T will execute | |
173 // only the second patched word. | |
174 // In other words, since the original instruction is this | |
175 // call patching_stub; nop (NativeCall) | |
176 // and the new sequence from the buffer is this: | |
177 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg) | |
178 // what T will execute is this: | |
179 // call patching_stub; add %r, %lo(K), %r | |
180 // thereby putting garbage into %r before calling the patching stub. | |
181 // This is OK, because the patching stub ignores the value of %r. | |
182 | |
183 // Make sure the first-patched instruction, which may co-exist | |
184 // briefly with the call, will do something harmless. | |
185 assert(inv_op(*contention_addr) == Assembler::arith_op || | |
10997 | 186 *contention_addr == nop_instruction(), |
0 | 187 "must not interfere with original call"); |
188 } | |
189 | |
190 // Similar to replace_mt_safe, but just changes the destination. The | |
191 // important thing is that free-running threads are able to execute this | |
192 // call instruction at all times. Thus, the displacement field must be | |
193 // instruction-word-aligned. This is always true on SPARC. | |
194 // | |
195 // Used in the runtime linkage of calls; see class CompiledIC. | |
196 void NativeCall::set_destination_mt_safe(address dest) { | |
197 assert(Patching_lock->is_locked() || | |
198 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); | |
199 // set_destination uses set_long_at which does the ICache::invalidate | |
200 set_destination(dest); | |
201 } | |
202 | |
203 // Code for unit testing implementation of NativeCall class | |
204 void NativeCall::test() { | |
205 #ifdef ASSERT | |
206 ResourceMark rm; | |
207 CodeBuffer cb("test", 100, 100); | |
208 MacroAssembler* a = new MacroAssembler(&cb); | |
209 NativeCall *nc; | |
210 uint idx; | |
211 int offsets[] = { | |
212 0x0, | |
213 0xfffffff0, | |
214 0x7ffffff0, | |
215 0x80000000, | |
216 0x20, | |
217 0x4000, | |
218 }; | |
219 | |
220 VM_Version::allow_all(); | |
221 | |
222 a->call( a->pc(), relocInfo::none ); | |
223 a->delayed()->nop(); | |
1748 | 224 nc = nativeCall_at( cb.insts_begin() ); |
0 | 225 nc->print(); |
226 | |
227 nc = nativeCall_overwriting_at( nc->next_instruction_address() ); | |
228 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { | |
1748 | 229 nc->set_destination( cb.insts_begin() + offsets[idx] ); |
230 assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test"); | |
0 | 231 nc->print(); |
232 } | |
233 | |
1748 | 234 nc = nativeCall_before( cb.insts_begin() + 8 ); |
0 | 235 nc->print(); |
236 | |
237 VM_Version::revert(); | |
238 #endif | |
239 } | |
240 // End code for unit testing implementation of NativeCall class | |
241 | |
242 //------------------------------------------------------------------- | |
243 | |
244 #ifdef _LP64 | |
245 | |
246 void NativeFarCall::set_destination(address dest) { | |
247 // Address materialized in the instruction stream, so nothing to do. | |
248 return; | |
249 #if 0 // What we'd do if we really did want to change the destination | |
250 if (destination() == dest) { | |
251 return; | |
252 } | |
253 ResourceMark rm; | |
254 CodeBuffer buf(addr_at(0), instruction_size + 1); | |
255 MacroAssembler* _masm = new MacroAssembler(&buf); | |
256 // Generate the new sequence | |
727 | 257 AddressLiteral(dest); |
258 _masm->jumpl_to(dest, O7, O7); | |
0 | 259 ICache::invalidate_range(addr_at(0), instruction_size ); |
260 #endif | |
261 } | |
262 | |
263 void NativeFarCall::verify() { | |
264 // make sure code pattern is actually a jumpl_to instruction | |
265 assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to"); | |
266 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); | |
267 nativeJump_at(addr_at(0))->verify(); | |
268 } | |
269 | |
270 bool NativeFarCall::is_call_at(address instr) { | |
271 return nativeInstruction_at(instr)->is_sethi(); | |
272 } | |
273 | |
274 void NativeFarCall::print() { | |
275 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination()); | |
276 } | |
277 | |
278 bool NativeFarCall::destination_is_compiled_verified_entry_point() { | |
279 nmethod* callee = CodeCache::find_nmethod(destination()); | |
280 if (callee == NULL) { | |
281 return false; | |
282 } else { | |
283 return destination() == callee->verified_entry_point(); | |
284 } | |
285 } | |
286 | |
287 // MT-safe patching of a far call. | |
288 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) { | |
289 Unimplemented(); | |
290 } | |
291 | |
292 // Code for unit testing implementation of NativeFarCall class | |
293 void NativeFarCall::test() { | |
294 Unimplemented(); | |
295 } | |
296 // End code for unit testing implementation of NativeFarCall class | |
297 | |
298 #endif // _LP64 | |
299 | |
300 //------------------------------------------------------------------- | |
301 | |
302 | |
303 void NativeMovConstReg::verify() { | |
304 NativeInstruction::verify(); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
305 // make sure code pattern is actually a "set_metadata" synthetic instruction |
0 | 306 // see MacroAssembler::set_oop() |
307 int i0 = long_at(sethi_offset); | |
308 int i1 = long_at(add_offset); | |
309 | |
310 // verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg" | |
311 Register rd = inv_rd(i0); | |
312 #ifndef _LP64 | |
313 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 && | |
314 is_op3(i1, Assembler::add_op3, Assembler::arith_op) && | |
315 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) && | |
316 rd == inv_rs1(i1) && rd == inv_rd(i1))) { | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
317 fatal("not a set_metadata"); |
0 | 318 } |
319 #else | |
320 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) { | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
321 fatal("not a set_metadata"); |
0 | 322 } |
323 #endif | |
324 } | |
325 | |
326 | |
327 void NativeMovConstReg::print() { | |
328 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data()); | |
329 } | |
330 | |
331 | |
332 #ifdef _LP64 | |
333 intptr_t NativeMovConstReg::data() const { | |
334 return data64(addr_at(sethi_offset), long_at(add_offset)); | |
335 } | |
336 #else | |
337 intptr_t NativeMovConstReg::data() const { | |
338 return data32(long_at(sethi_offset), long_at(add_offset)); | |
339 } | |
340 #endif | |
341 | |
342 | |
343 void NativeMovConstReg::set_data(intptr_t x) { | |
344 #ifdef _LP64 | |
345 set_data64_sethi(addr_at(sethi_offset), x); | |
346 #else | |
347 set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x)); | |
348 #endif | |
349 set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x)); | |
350 | |
351 // also store the value into an oop_Relocation cell, if any | |
1563
1a5913bf5e19
6951083: oops and relocations should part of nmethod not CodeBlob
twisti
parents:
727
diff
changeset
|
352 CodeBlob* cb = CodeCache::find_blob(instruction_address()); |
1a5913bf5e19
6951083: oops and relocations should part of nmethod not CodeBlob
twisti
parents:
727
diff
changeset
|
353 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL; |
0 | 354 if (nm != NULL) { |
355 RelocIterator iter(nm, instruction_address(), next_instruction_address()); | |
356 oop* oop_addr = NULL; | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
357 Metadata** metadata_addr = NULL; |
0 | 358 while (iter.next()) { |
359 if (iter.type() == relocInfo::oop_type) { | |
360 oop_Relocation *r = iter.oop_reloc(); | |
361 if (oop_addr == NULL) { | |
362 oop_addr = r->oop_addr(); | |
12316
190899198332
7195622: CheckUnhandledOops has limited usefulness now
hseigel
parents:
10997
diff
changeset
|
363 *oop_addr = cast_to_oop(x); |
0 | 364 } else { |
365 assert(oop_addr == r->oop_addr(), "must be only one set-oop here"); | |
366 } | |
367 } | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
368 if (iter.type() == relocInfo::metadata_type) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
369 metadata_Relocation *r = iter.metadata_reloc(); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
370 if (metadata_addr == NULL) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
371 metadata_addr = r->metadata_addr(); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
372 *metadata_addr = (Metadata*)x; |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
373 } else { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
374 assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here"); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
375 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
376 } |
0 | 377 } |
378 } | |
379 } | |
380 | |
381 | |
382 // Code for unit testing implementation of NativeMovConstReg class | |
383 void NativeMovConstReg::test() { | |
384 #ifdef ASSERT | |
385 ResourceMark rm; | |
386 CodeBuffer cb("test", 100, 100); | |
387 MacroAssembler* a = new MacroAssembler(&cb); | |
388 NativeMovConstReg* nm; | |
389 uint idx; | |
390 int offsets[] = { | |
391 0x0, | |
392 0x7fffffff, | |
393 0x80000000, | |
394 0xffffffff, | |
395 0x20, | |
396 4096, | |
397 4097, | |
398 }; | |
399 | |
400 VM_Version::allow_all(); | |
401 | |
727 | 402 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type); |
403 a->sethi(al1, I3); | |
404 a->add(I3, al1.low10(), I3); | |
405 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type); | |
406 a->sethi(al2, O2); | |
407 a->add(O2, al2.low10(), O2); | |
0 | 408 |
1748 | 409 nm = nativeMovConstReg_at( cb.insts_begin() ); |
0 | 410 nm->print(); |
411 | |
412 nm = nativeMovConstReg_at( nm->next_instruction_address() ); | |
413 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { | |
414 nm->set_data( offsets[idx] ); | |
415 assert(nm->data() == offsets[idx], "check unit test"); | |
416 } | |
417 nm->print(); | |
418 | |
419 VM_Version::revert(); | |
420 #endif | |
421 } | |
422 // End code for unit testing implementation of NativeMovConstReg class | |
423 | |
424 //------------------------------------------------------------------- | |
425 | |
426 void NativeMovConstRegPatching::verify() { | |
427 NativeInstruction::verify(); | |
428 // Make sure code pattern is sethi/nop/add. | |
429 int i0 = long_at(sethi_offset); | |
430 int i1 = long_at(nop_offset); | |
431 int i2 = long_at(add_offset); | |
432 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); | |
433 | |
434 // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg" | |
435 // The casual reader should note that on Sparc a nop is a special case if sethi | |
436 // in which the destination register is %g0. | |
437 Register rd0 = inv_rd(i0); | |
438 Register rd1 = inv_rd(i1); | |
439 if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 && | |
440 is_op2(i1, Assembler::sethi_op2) && rd1 == G0 && // nop is a special case of sethi | |
441 is_op3(i2, Assembler::add_op3, Assembler::arith_op) && | |
442 inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) && | |
443 rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) { | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
444 fatal("not a set_metadata"); |
0 | 445 } |
446 } | |
447 | |
448 | |
449 void NativeMovConstRegPatching::print() { | |
450 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data()); | |
451 } | |
452 | |
453 | |
454 int NativeMovConstRegPatching::data() const { | |
455 #ifdef _LP64 | |
456 return data64(addr_at(sethi_offset), long_at(add_offset)); | |
457 #else | |
458 return data32(long_at(sethi_offset), long_at(add_offset)); | |
459 #endif | |
460 } | |
461 | |
462 | |
463 void NativeMovConstRegPatching::set_data(int x) { | |
464 #ifdef _LP64 | |
465 set_data64_sethi(addr_at(sethi_offset), x); | |
466 #else | |
467 set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x)); | |
468 #endif | |
469 set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x)); | |
470 | |
471 // also store the value into an oop_Relocation cell, if any | |
1563
1a5913bf5e19
6951083: oops and relocations should part of nmethod not CodeBlob
twisti
parents:
727
diff
changeset
|
472 CodeBlob* cb = CodeCache::find_blob(instruction_address()); |
1a5913bf5e19
6951083: oops and relocations should part of nmethod not CodeBlob
twisti
parents:
727
diff
changeset
|
473 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL; |
0 | 474 if (nm != NULL) { |
475 RelocIterator iter(nm, instruction_address(), next_instruction_address()); | |
476 oop* oop_addr = NULL; | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
477 Metadata** metadata_addr = NULL; |
0 | 478 while (iter.next()) { |
479 if (iter.type() == relocInfo::oop_type) { | |
480 oop_Relocation *r = iter.oop_reloc(); | |
481 if (oop_addr == NULL) { | |
482 oop_addr = r->oop_addr(); | |
12316
190899198332
7195622: CheckUnhandledOops has limited usefulness now
hseigel
parents:
10997
diff
changeset
|
483 *oop_addr = cast_to_oop(x); |
0 | 484 } else { |
485 assert(oop_addr == r->oop_addr(), "must be only one set-oop here"); | |
486 } | |
487 } | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
488 if (iter.type() == relocInfo::metadata_type) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
489 metadata_Relocation *r = iter.metadata_reloc(); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
490 if (metadata_addr == NULL) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
491 metadata_addr = r->metadata_addr(); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
492 *metadata_addr = (Metadata*)x; |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
493 } else { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
494 assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here"); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
495 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
496 } |
0 | 497 } |
498 } | |
499 } | |
500 | |
501 | |
502 // Code for unit testing implementation of NativeMovConstRegPatching class | |
503 void NativeMovConstRegPatching::test() { | |
504 #ifdef ASSERT | |
505 ResourceMark rm; | |
506 CodeBuffer cb("test", 100, 100); | |
507 MacroAssembler* a = new MacroAssembler(&cb); | |
508 NativeMovConstRegPatching* nm; | |
509 uint idx; | |
510 int offsets[] = { | |
511 0x0, | |
512 0x7fffffff, | |
513 0x80000000, | |
514 0xffffffff, | |
515 0x20, | |
516 4096, | |
517 4097, | |
518 }; | |
519 | |
520 VM_Version::allow_all(); | |
521 | |
727 | 522 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type); |
523 a->sethi(al1, I3); | |
0 | 524 a->nop(); |
727 | 525 a->add(I3, al1.low10(), I3); |
526 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type); | |
527 a->sethi(al2, O2); | |
0 | 528 a->nop(); |
727 | 529 a->add(O2, al2.low10(), O2); |
0 | 530 |
1748 | 531 nm = nativeMovConstRegPatching_at( cb.insts_begin() ); |
0 | 532 nm->print(); |
533 | |
534 nm = nativeMovConstRegPatching_at( nm->next_instruction_address() ); | |
535 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { | |
536 nm->set_data( offsets[idx] ); | |
537 assert(nm->data() == offsets[idx], "check unit test"); | |
538 } | |
539 nm->print(); | |
540 | |
541 VM_Version::revert(); | |
542 #endif // ASSERT | |
543 } | |
544 // End code for unit testing implementation of NativeMovConstRegPatching class | |
545 | |
546 | |
547 //------------------------------------------------------------------- | |
548 | |
549 | |
550 void NativeMovRegMem::copy_instruction_to(address new_instruction_address) { | |
551 Untested("copy_instruction_to"); | |
552 int instruction_size = next_instruction_address() - instruction_address(); | |
553 for (int i = 0; i < instruction_size; i += BytesPerInstWord) { | |
554 *(int*)(new_instruction_address + i) = *(int*)(address(this) + i); | |
555 } | |
556 } | |
557 | |
558 | |
559 void NativeMovRegMem::verify() { | |
560 NativeInstruction::verify(); | |
561 // make sure code pattern is actually a "ld" or "st" of some sort. | |
562 int i0 = long_at(0); | |
563 int op3 = inv_op3(i0); | |
564 | |
565 assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok"); | |
566 | |
567 if (!(is_op(i0, Assembler::ldst_op) && | |
568 inv_immed(i0) && | |
569 0 != (op3 < op3_ldst_int_limit | |
570 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) | |
571 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) | |
572 { | |
573 int i1 = long_at(ldst_offset); | |
574 Register rd = inv_rd(i0); | |
575 | |
576 op3 = inv_op3(i1); | |
577 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) && | |
578 0 != (op3 < op3_ldst_int_limit | |
579 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) | |
580 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) { | |
581 fatal("not a ld* or st* op"); | |
582 } | |
583 } | |
584 } | |
585 | |
586 | |
587 void NativeMovRegMem::print() { | |
588 if (is_immediate()) { | |
589 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset()); | |
590 } else { | |
591 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address()); | |
592 } | |
593 } | |
594 | |
595 | |
596 // Code for unit testing implementation of NativeMovRegMem class | |
597 void NativeMovRegMem::test() { | |
598 #ifdef ASSERT | |
599 ResourceMark rm; | |
600 CodeBuffer cb("test", 1000, 1000); | |
601 MacroAssembler* a = new MacroAssembler(&cb); | |
602 NativeMovRegMem* nm; | |
603 uint idx = 0; | |
604 uint idx1; | |
605 int offsets[] = { | |
606 0x0, | |
607 0xffffffff, | |
608 0x7fffffff, | |
609 0x80000000, | |
610 4096, | |
611 4097, | |
612 0x20, | |
613 0x4000, | |
614 }; | |
615 | |
616 VM_Version::allow_all(); | |
617 | |
727 | 618 AddressLiteral al1(0xffffffff, relocInfo::external_word_type); |
619 AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type); | |
620 a->ldsw( G5, al1.low10(), G4 ); idx++; | |
621 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 622 a->ldsw( G5, I3, G4 ); idx++; |
727 | 623 a->ldsb( G5, al1.low10(), G4 ); idx++; |
624 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 625 a->ldsb( G5, I3, G4 ); idx++; |
727 | 626 a->ldsh( G5, al1.low10(), G4 ); idx++; |
627 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 628 a->ldsh( G5, I3, G4 ); idx++; |
727 | 629 a->lduw( G5, al1.low10(), G4 ); idx++; |
630 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 631 a->lduw( G5, I3, G4 ); idx++; |
727 | 632 a->ldub( G5, al1.low10(), G4 ); idx++; |
633 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 634 a->ldub( G5, I3, G4 ); idx++; |
727 | 635 a->lduh( G5, al1.low10(), G4 ); idx++; |
636 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 637 a->lduh( G5, I3, G4 ); idx++; |
727 | 638 a->ldx( G5, al1.low10(), G4 ); idx++; |
639 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 640 a->ldx( G5, I3, G4 ); idx++; |
727 | 641 a->ldd( G5, al1.low10(), G4 ); idx++; |
642 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 643 a->ldd( G5, I3, G4 ); idx++; |
644 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; | |
727 | 645 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); |
0 | 646 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; |
647 | |
727 | 648 a->stw( G5, G4, al1.low10() ); idx++; |
649 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 650 a->stw( G5, G4, I3 ); idx++; |
727 | 651 a->stb( G5, G4, al1.low10() ); idx++; |
652 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 653 a->stb( G5, G4, I3 ); idx++; |
727 | 654 a->sth( G5, G4, al1.low10() ); idx++; |
655 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 656 a->sth( G5, G4, I3 ); idx++; |
727 | 657 a->stx( G5, G4, al1.low10() ); idx++; |
658 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 659 a->stx( G5, G4, I3 ); idx++; |
727 | 660 a->std( G5, G4, al1.low10() ); idx++; |
661 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 662 a->std( G5, G4, I3 ); idx++; |
663 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; | |
727 | 664 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); |
0 | 665 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; |
666 | |
1748 | 667 nm = nativeMovRegMem_at( cb.insts_begin() ); |
0 | 668 nm->print(); |
669 nm->set_offset( low10(0) ); | |
670 nm->print(); | |
671 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); | |
672 nm->print(); | |
673 | |
674 while (--idx) { | |
675 nm = nativeMovRegMem_at( nm->next_instruction_address() ); | |
676 nm->print(); | |
677 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) { | |
678 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] ); | |
679 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]), | |
680 "check unit test"); | |
681 nm->print(); | |
682 } | |
683 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); | |
684 nm->print(); | |
685 } | |
686 | |
687 VM_Version::revert(); | |
688 #endif // ASSERT | |
689 } | |
690 | |
691 // End code for unit testing implementation of NativeMovRegMem class | |
692 | |
693 //-------------------------------------------------------------------------------- | |
694 | |
695 | |
696 void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) { | |
697 Untested("copy_instruction_to"); | |
698 int instruction_size = next_instruction_address() - instruction_address(); | |
699 for (int i = 0; i < instruction_size; i += wordSize) { | |
700 *(long*)(new_instruction_address + i) = *(long*)(address(this) + i); | |
701 } | |
702 } | |
703 | |
704 | |
705 void NativeMovRegMemPatching::verify() { | |
706 NativeInstruction::verify(); | |
707 // make sure code pattern is actually a "ld" or "st" of some sort. | |
708 int i0 = long_at(0); | |
709 int op3 = inv_op3(i0); | |
710 | |
711 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); | |
712 | |
713 if (!(is_op(i0, Assembler::ldst_op) && | |
714 inv_immed(i0) && | |
715 0 != (op3 < op3_ldst_int_limit | |
716 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) | |
717 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) { | |
718 int i1 = long_at(ldst_offset); | |
719 Register rd = inv_rd(i0); | |
720 | |
721 op3 = inv_op3(i1); | |
722 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) && | |
723 0 != (op3 < op3_ldst_int_limit | |
724 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) | |
725 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) { | |
726 fatal("not a ld* or st* op"); | |
727 } | |
728 } | |
729 } | |
730 | |
731 | |
732 void NativeMovRegMemPatching::print() { | |
733 if (is_immediate()) { | |
734 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset()); | |
735 } else { | |
736 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address()); | |
737 } | |
738 } | |
739 | |
740 | |
741 // Code for unit testing implementation of NativeMovRegMemPatching class | |
742 void NativeMovRegMemPatching::test() { | |
743 #ifdef ASSERT | |
744 ResourceMark rm; | |
745 CodeBuffer cb("test", 1000, 1000); | |
746 MacroAssembler* a = new MacroAssembler(&cb); | |
747 NativeMovRegMemPatching* nm; | |
748 uint idx = 0; | |
749 uint idx1; | |
750 int offsets[] = { | |
751 0x0, | |
752 0xffffffff, | |
753 0x7fffffff, | |
754 0x80000000, | |
755 4096, | |
756 4097, | |
757 0x20, | |
758 0x4000, | |
759 }; | |
760 | |
761 VM_Version::allow_all(); | |
762 | |
727 | 763 AddressLiteral al(0xffffffff, relocInfo::external_word_type); |
764 a->ldsw( G5, al.low10(), G4); idx++; | |
765 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 766 a->ldsw( G5, I3, G4 ); idx++; |
727 | 767 a->ldsb( G5, al.low10(), G4); idx++; |
768 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 769 a->ldsb( G5, I3, G4 ); idx++; |
727 | 770 a->ldsh( G5, al.low10(), G4); idx++; |
771 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 772 a->ldsh( G5, I3, G4 ); idx++; |
727 | 773 a->lduw( G5, al.low10(), G4); idx++; |
774 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 775 a->lduw( G5, I3, G4 ); idx++; |
727 | 776 a->ldub( G5, al.low10(), G4); idx++; |
777 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 778 a->ldub( G5, I3, G4 ); idx++; |
727 | 779 a->lduh( G5, al.low10(), G4); idx++; |
780 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 781 a->lduh( G5, I3, G4 ); idx++; |
727 | 782 a->ldx( G5, al.low10(), G4); idx++; |
783 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
784 a->ldx( G5, I3, G4 ); idx++; | |
785 a->ldd( G5, al.low10(), G4); idx++; | |
786 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
787 a->ldd( G5, I3, G4 ); idx++; | |
788 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; | |
789 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
790 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; | |
0 | 791 |
727 | 792 a->stw( G5, G4, al.low10()); idx++; |
793 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 794 a->stw( G5, G4, I3 ); idx++; |
727 | 795 a->stb( G5, G4, al.low10()); idx++; |
796 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 797 a->stb( G5, G4, I3 ); idx++; |
727 | 798 a->sth( G5, G4, al.low10()); idx++; |
799 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 800 a->sth( G5, G4, I3 ); idx++; |
727 | 801 a->stx( G5, G4, al.low10()); idx++; |
802 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 803 a->stx( G5, G4, I3 ); idx++; |
727 | 804 a->std( G5, G4, al.low10()); idx++; |
805 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 806 a->std( G5, G4, I3 ); idx++; |
807 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; | |
727 | 808 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); |
0 | 809 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; |
810 | |
1748 | 811 nm = nativeMovRegMemPatching_at( cb.insts_begin() ); |
0 | 812 nm->print(); |
813 nm->set_offset( low10(0) ); | |
814 nm->print(); | |
815 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); | |
816 nm->print(); | |
817 | |
818 while (--idx) { | |
819 nm = nativeMovRegMemPatching_at( nm->next_instruction_address() ); | |
820 nm->print(); | |
821 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) { | |
822 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] ); | |
823 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]), | |
824 "check unit test"); | |
825 nm->print(); | |
826 } | |
827 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); | |
828 nm->print(); | |
829 } | |
830 | |
831 VM_Version::revert(); | |
832 #endif // ASSERT | |
833 } | |
834 // End code for unit testing implementation of NativeMovRegMemPatching class | |
835 | |
836 | |
837 //-------------------------------------------------------------------------------- | |
838 | |
839 | |
840 void NativeJump::verify() { | |
841 NativeInstruction::verify(); | |
842 int i0 = long_at(sethi_offset); | |
843 int i1 = long_at(jmpl_offset); | |
844 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); | |
845 // verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg" | |
846 Register rd = inv_rd(i0); | |
847 #ifndef _LP64 | |
848 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 && | |
849 (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) || | |
850 (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) && | |
851 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) && | |
852 rd == inv_rs1(i1))) { | |
853 fatal("not a jump_to instruction"); | |
854 } | |
855 #else | |
856 // In LP64, the jump instruction location varies for non relocatable | |
857 // jumps, for example is could be sethi, xor, jmp instead of the | |
858 // 7 instructions for sethi. So let's check sethi only. | |
859 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) { | |
860 fatal("not a jump_to instruction"); | |
861 } | |
862 #endif | |
863 } | |
864 | |
865 | |
866 void NativeJump::print() { | |
867 tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination()); | |
868 } | |
869 | |
870 | |
871 // Code for unit testing implementation of NativeJump class | |
872 void NativeJump::test() { | |
873 #ifdef ASSERT | |
874 ResourceMark rm; | |
875 CodeBuffer cb("test", 100, 100); | |
876 MacroAssembler* a = new MacroAssembler(&cb); | |
877 NativeJump* nj; | |
878 uint idx; | |
879 int offsets[] = { | |
880 0x0, | |
881 0xffffffff, | |
882 0x7fffffff, | |
883 0x80000000, | |
884 4096, | |
885 4097, | |
886 0x20, | |
887 0x4000, | |
888 }; | |
889 | |
890 VM_Version::allow_all(); | |
891 | |
727 | 892 AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type); |
893 a->sethi(al, I3); | |
894 a->jmpl(I3, al.low10(), G0, RelocationHolder::none); | |
0 | 895 a->delayed()->nop(); |
727 | 896 a->sethi(al, I3); |
897 a->jmpl(I3, al.low10(), L3, RelocationHolder::none); | |
0 | 898 a->delayed()->nop(); |
899 | |
1748 | 900 nj = nativeJump_at( cb.insts_begin() ); |
0 | 901 nj->print(); |
902 | |
903 nj = nativeJump_at( nj->next_instruction_address() ); | |
904 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { | |
905 nj->set_jump_destination( nj->instruction_address() + offsets[idx] ); | |
906 assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test"); | |
907 nj->print(); | |
908 } | |
909 | |
910 VM_Version::revert(); | |
911 #endif // ASSERT | |
912 } | |
913 // End code for unit testing implementation of NativeJump class | |
914 | |
915 | |
916 void NativeJump::insert(address code_pos, address entry) { | |
917 Unimplemented(); | |
918 } | |
919 | |
920 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie) | |
921 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot). | |
922 // Atomic write can be only with 1 word. | |
923 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { | |
924 // Here's one way to do it: Pre-allocate a three-word jump sequence somewhere | |
925 // in the header of the nmethod, within a short branch's span of the patch point. | |
926 // Set up the jump sequence using NativeJump::insert, and then use an annulled | |
927 // unconditional branch at the target site (an atomic 1-word update). | |
928 // Limitations: You can only patch nmethods, with any given nmethod patched at | |
929 // most once, and the patch must be in the nmethod's header. | |
930 // It's messy, but you can ask the CodeCache for the nmethod containing the | |
931 // target address. | |
932 | |
933 // %%%%% For now, do something MT-stupid: | |
934 ResourceMark rm; | |
935 int code_size = 1 * BytesPerInstWord; | |
936 CodeBuffer cb(verified_entry, code_size + 1); | |
937 MacroAssembler* a = new MacroAssembler(&cb); | |
10997 | 938 a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler |
0 | 939 ICache::invalidate_range(verified_entry, code_size); |
940 } | |
941 | |
942 | |
943 void NativeIllegalInstruction::insert(address code_pos) { | |
944 NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos); | |
945 nii->set_long_at(0, illegal_instruction()); | |
946 } | |
947 | |
948 static int illegal_instruction_bits = 0; | |
949 | |
950 int NativeInstruction::illegal_instruction() { | |
951 if (illegal_instruction_bits == 0) { | |
952 ResourceMark rm; | |
953 char buf[40]; | |
954 CodeBuffer cbuf((address)&buf[0], 20); | |
955 MacroAssembler* a = new MacroAssembler(&cbuf); | |
956 address ia = a->pc(); | |
957 a->trap(ST_RESERVED_FOR_USER_0 + 1); | |
958 int bits = *(int*)ia; | |
959 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction"); | |
960 illegal_instruction_bits = bits; | |
961 assert(illegal_instruction_bits != 0, "oops"); | |
962 } | |
963 return illegal_instruction_bits; | |
964 } | |
965 | |
966 static int ic_miss_trap_bits = 0; | |
967 | |
968 bool NativeInstruction::is_ic_miss_trap() { | |
969 if (ic_miss_trap_bits == 0) { | |
970 ResourceMark rm; | |
971 char buf[40]; | |
972 CodeBuffer cbuf((address)&buf[0], 20); | |
973 MacroAssembler* a = new MacroAssembler(&cbuf); | |
974 address ia = a->pc(); | |
975 a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2); | |
976 int bits = *(int*)ia; | |
977 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction"); | |
978 ic_miss_trap_bits = bits; | |
979 assert(ic_miss_trap_bits != 0, "oops"); | |
980 } | |
981 return long_at(0) == ic_miss_trap_bits; | |
982 } | |
983 | |
984 | |
985 bool NativeInstruction::is_illegal() { | |
986 if (illegal_instruction_bits == 0) { | |
987 return false; | |
988 } | |
989 return long_at(0) == illegal_instruction_bits; | |
990 } | |
991 | |
992 | |
993 void NativeGeneralJump::verify() { | |
994 assert(((NativeInstruction *)this)->is_jump() || | |
995 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction"); | |
996 } | |
997 | |
998 | |
999 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { | |
1000 Assembler::Condition condition = Assembler::always; | |
1001 int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) | | |
1002 Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22); | |
1003 NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos); | |
1004 ni->set_long_at(0, x); | |
1005 } | |
1006 | |
1007 | |
1008 // MT-safe patching of a jmp instruction (and following word). | |
1009 // First patches the second word, and then atomicly replaces | |
1010 // the first word with the first new instruction word. | |
1011 // Other processors might briefly see the old first word | |
1012 // followed by the new second word. This is OK if the old | |
1013 // second word is harmless, and the new second word may be | |
1014 // harmlessly executed in the delay slot of the call. | |
1015 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { | |
1016 assert(Patching_lock->is_locked() || | |
1017 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); | |
1018 assert (instr_addr != NULL, "illegal address for code patching"); | |
1019 NativeGeneralJump* h_jump = nativeGeneralJump_at (instr_addr); // checking that it is a call | |
1020 assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8"); | |
1021 int i0 = ((int*)code_buffer)[0]; | |
1022 int i1 = ((int*)code_buffer)[1]; | |
1023 int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord); | |
1024 assert(inv_op(*contention_addr) == Assembler::arith_op || | |
10997 | 1025 *contention_addr == nop_instruction(), |
0 | 1026 "must not interfere with original call"); |
1027 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order | |
1028 h_jump->set_long_at(1*BytesPerInstWord, i1); | |
1029 h_jump->set_long_at(0*BytesPerInstWord, i0); | |
1030 // NOTE: It is possible that another thread T will execute | |
1031 // only the second patched word. | |
1032 // In other words, since the original instruction is this | |
1033 // jmp patching_stub; nop (NativeGeneralJump) | |
1034 // and the new sequence from the buffer is this: | |
1035 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg) | |
1036 // what T will execute is this: | |
1037 // jmp patching_stub; add %r, %lo(K), %r | |
1038 // thereby putting garbage into %r before calling the patching stub. | |
1039 // This is OK, because the patching stub ignores the value of %r. | |
1040 | |
1041 // Make sure the first-patched instruction, which may co-exist | |
1042 // briefly with the call, will do something harmless. | |
1043 assert(inv_op(*contention_addr) == Assembler::arith_op || | |
10997 | 1044 *contention_addr == nop_instruction(), |
0 | 1045 "must not interfere with original call"); |
1046 } |