Mercurial > hg > graal-compiler
annotate src/cpu/sparc/vm/nativeInst_sparc.cpp @ 6812:988bf00cc564
7200261: G1: Liveness counting inconsistencies during marking verification
Summary: The clipping code in the routine that sets the bits for a range of cards, in the liveness accounting verification code was incorrect. It set all the bits in the card bitmap from the given starting index which would lead to spurious marking verification failures.
Reviewed-by: brutisso, jwilhelm, jmasa
author | johnc |
---|---|
date | Thu, 27 Sep 2012 15:44:01 -0700 |
parents | da91efe96a93 |
children | f0c2369fda5a |
rev | line source |
---|---|
0 | 1 /* |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
727
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
727
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
727
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "assembler_sparc.inline.hpp" | |
27 #include "memory/resourceArea.hpp" | |
28 #include "nativeInst_sparc.hpp" | |
29 #include "oops/oop.inline.hpp" | |
30 #include "runtime/handles.hpp" | |
31 #include "runtime/sharedRuntime.hpp" | |
32 #include "runtime/stubRoutines.hpp" | |
33 #include "utilities/ostream.hpp" | |
34 #ifdef COMPILER1 | |
35 #include "c1/c1_Runtime1.hpp" | |
36 #endif | |
0 | 37 |
38 | |
116
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
0
diff
changeset
|
39 bool NativeInstruction::is_dtrace_trap() { |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
0
diff
changeset
|
40 return !is_nop(); |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
0
diff
changeset
|
41 } |
018d5b58dd4f
6537506: Provide a mechanism for specifying Java-level USDT-like dtrace probes
kamg
parents:
0
diff
changeset
|
42 |
0 | 43 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) { |
44 ResourceMark rm; | |
45 CodeBuffer buf(instaddr, 10 * BytesPerInstWord ); | |
46 MacroAssembler* _masm = new MacroAssembler(&buf); | |
47 Register destreg; | |
48 | |
49 destreg = inv_rd(*(unsigned int *)instaddr); | |
50 // Generate a the new sequence | |
727 | 51 _masm->patchable_sethi(x, destreg); |
0 | 52 ICache::invalidate_range(instaddr, 7 * BytesPerInstWord); |
53 } | |
54 | |
2375
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
55 void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) { |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
56 ResourceMark rm; |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
57 unsigned char buffer[10 * BytesPerInstWord]; |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
58 CodeBuffer buf(buffer, 10 * BytesPerInstWord); |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
59 MacroAssembler masm(&buf); |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
60 |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
61 Register destreg = inv_rd(*(unsigned int *)instaddr); |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
62 // Generate the proper sequence into a temporary buffer and compare |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
63 // it with the original sequence. |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
64 masm.patchable_sethi(x, destreg); |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
65 int len = buffer - masm.pc(); |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
66 for (int i = 0; i < len; i++) { |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
67 assert(instaddr[i] == buffer[i], "instructions must match"); |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
68 } |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
69 } |
d673ef06fe96
7028374: race in fix_oop_relocations for scavengeable nmethods
never
parents:
1972
diff
changeset
|
70 |
0 | 71 void NativeInstruction::verify() { |
72 // make sure code pattern is actually an instruction address | |
73 address addr = addr_at(0); | |
74 if (addr == 0 || ((intptr_t)addr & 3) != 0) { | |
75 fatal("not an instruction address"); | |
76 } | |
77 } | |
78 | |
79 void NativeInstruction::print() { | |
80 tty->print_cr(INTPTR_FORMAT ": 0x%x", addr_at(0), long_at(0)); | |
81 } | |
82 | |
83 void NativeInstruction::set_long_at(int offset, int i) { | |
84 address addr = addr_at(offset); | |
85 *(int*)addr = i; | |
86 ICache::invalidate_word(addr); | |
87 } | |
88 | |
89 void NativeInstruction::set_jlong_at(int offset, jlong i) { | |
90 address addr = addr_at(offset); | |
91 *(jlong*)addr = i; | |
92 // Don't need to invalidate 2 words here, because | |
93 // the flush instruction operates on doublewords. | |
94 ICache::invalidate_word(addr); | |
95 } | |
96 | |
97 void NativeInstruction::set_addr_at(int offset, address x) { | |
98 address addr = addr_at(offset); | |
99 assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment"); | |
100 *(uintptr_t*)addr = (uintptr_t)x; | |
101 // Don't need to invalidate 2 words here in the 64-bit case, | |
102 // because the flush instruction operates on doublewords. | |
103 ICache::invalidate_word(addr); | |
104 // The Intel code has this assertion for NativeCall::set_destination, | |
105 // NativeMovConstReg::set_data, NativeMovRegMem::set_offset, | |
106 // NativeJump::set_jump_destination, and NativePushImm32::set_data | |
107 //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction") | |
108 } | |
109 | |
110 bool NativeInstruction::is_zero_test(Register ®) { | |
111 int x = long_at(0); | |
112 Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3); | |
113 if (is_op3(x, temp, Assembler::arith_op) && | |
114 inv_immed(x) && inv_rd(x) == G0) { | |
115 if (inv_rs1(x) == G0) { | |
116 reg = inv_rs2(x); | |
117 return true; | |
118 } else if (inv_rs2(x) == G0) { | |
119 reg = inv_rs1(x); | |
120 return true; | |
121 } | |
122 } | |
123 return false; | |
124 } | |
125 | |
126 bool NativeInstruction::is_load_store_with_small_offset(Register reg) { | |
127 int x = long_at(0); | |
128 if (is_op(x, Assembler::ldst_op) && | |
129 inv_rs1(x) == reg && inv_immed(x)) { | |
130 return true; | |
131 } | |
132 return false; | |
133 } | |
134 | |
135 void NativeCall::verify() { | |
136 NativeInstruction::verify(); | |
137 // make sure code pattern is actually a call instruction | |
138 if (!is_op(long_at(0), Assembler::call_op)) { | |
139 fatal("not a call"); | |
140 } | |
141 } | |
142 | |
143 void NativeCall::print() { | |
144 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination()); | |
145 } | |
146 | |
147 | |
148 // MT-safe patching of a call instruction (and following word). | |
149 // First patches the second word, and then atomicly replaces | |
150 // the first word with the first new instruction word. | |
151 // Other processors might briefly see the old first word | |
152 // followed by the new second word. This is OK if the old | |
153 // second word is harmless, and the new second word may be | |
154 // harmlessly executed in the delay slot of the call. | |
155 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) { | |
156 assert(Patching_lock->is_locked() || | |
157 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); | |
158 assert (instr_addr != NULL, "illegal address for code patching"); | |
159 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call | |
160 assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8"); | |
161 int i0 = ((int*)code_buffer)[0]; | |
162 int i1 = ((int*)code_buffer)[1]; | |
163 int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord); | |
164 assert(inv_op(*contention_addr) == Assembler::arith_op || | |
165 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), | |
166 "must not interfere with original call"); | |
167 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order | |
168 n_call->set_long_at(1*BytesPerInstWord, i1); | |
169 n_call->set_long_at(0*BytesPerInstWord, i0); | |
170 // NOTE: It is possible that another thread T will execute | |
171 // only the second patched word. | |
172 // In other words, since the original instruction is this | |
173 // call patching_stub; nop (NativeCall) | |
174 // and the new sequence from the buffer is this: | |
175 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg) | |
176 // what T will execute is this: | |
177 // call patching_stub; add %r, %lo(K), %r | |
178 // thereby putting garbage into %r before calling the patching stub. | |
179 // This is OK, because the patching stub ignores the value of %r. | |
180 | |
181 // Make sure the first-patched instruction, which may co-exist | |
182 // briefly with the call, will do something harmless. | |
183 assert(inv_op(*contention_addr) == Assembler::arith_op || | |
184 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), | |
185 "must not interfere with original call"); | |
186 } | |
187 | |
188 // Similar to replace_mt_safe, but just changes the destination. The | |
189 // important thing is that free-running threads are able to execute this | |
190 // call instruction at all times. Thus, the displacement field must be | |
191 // instruction-word-aligned. This is always true on SPARC. | |
192 // | |
193 // Used in the runtime linkage of calls; see class CompiledIC. | |
194 void NativeCall::set_destination_mt_safe(address dest) { | |
195 assert(Patching_lock->is_locked() || | |
196 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); | |
197 // set_destination uses set_long_at which does the ICache::invalidate | |
198 set_destination(dest); | |
199 } | |
200 | |
201 // Code for unit testing implementation of NativeCall class | |
202 void NativeCall::test() { | |
203 #ifdef ASSERT | |
204 ResourceMark rm; | |
205 CodeBuffer cb("test", 100, 100); | |
206 MacroAssembler* a = new MacroAssembler(&cb); | |
207 NativeCall *nc; | |
208 uint idx; | |
209 int offsets[] = { | |
210 0x0, | |
211 0xfffffff0, | |
212 0x7ffffff0, | |
213 0x80000000, | |
214 0x20, | |
215 0x4000, | |
216 }; | |
217 | |
218 VM_Version::allow_all(); | |
219 | |
220 a->call( a->pc(), relocInfo::none ); | |
221 a->delayed()->nop(); | |
1748 | 222 nc = nativeCall_at( cb.insts_begin() ); |
0 | 223 nc->print(); |
224 | |
225 nc = nativeCall_overwriting_at( nc->next_instruction_address() ); | |
226 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { | |
1748 | 227 nc->set_destination( cb.insts_begin() + offsets[idx] ); |
228 assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test"); | |
0 | 229 nc->print(); |
230 } | |
231 | |
1748 | 232 nc = nativeCall_before( cb.insts_begin() + 8 ); |
0 | 233 nc->print(); |
234 | |
235 VM_Version::revert(); | |
236 #endif | |
237 } | |
238 // End code for unit testing implementation of NativeCall class | |
239 | |
240 //------------------------------------------------------------------- | |
241 | |
242 #ifdef _LP64 | |
243 | |
244 void NativeFarCall::set_destination(address dest) { | |
245 // Address materialized in the instruction stream, so nothing to do. | |
246 return; | |
247 #if 0 // What we'd do if we really did want to change the destination | |
248 if (destination() == dest) { | |
249 return; | |
250 } | |
251 ResourceMark rm; | |
252 CodeBuffer buf(addr_at(0), instruction_size + 1); | |
253 MacroAssembler* _masm = new MacroAssembler(&buf); | |
254 // Generate the new sequence | |
727 | 255 AddressLiteral(dest); |
256 _masm->jumpl_to(dest, O7, O7); | |
0 | 257 ICache::invalidate_range(addr_at(0), instruction_size ); |
258 #endif | |
259 } | |
260 | |
261 void NativeFarCall::verify() { | |
262 // make sure code pattern is actually a jumpl_to instruction | |
263 assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to"); | |
264 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); | |
265 nativeJump_at(addr_at(0))->verify(); | |
266 } | |
267 | |
268 bool NativeFarCall::is_call_at(address instr) { | |
269 return nativeInstruction_at(instr)->is_sethi(); | |
270 } | |
271 | |
272 void NativeFarCall::print() { | |
273 tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, instruction_address(), destination()); | |
274 } | |
275 | |
276 bool NativeFarCall::destination_is_compiled_verified_entry_point() { | |
277 nmethod* callee = CodeCache::find_nmethod(destination()); | |
278 if (callee == NULL) { | |
279 return false; | |
280 } else { | |
281 return destination() == callee->verified_entry_point(); | |
282 } | |
283 } | |
284 | |
285 // MT-safe patching of a far call. | |
286 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) { | |
287 Unimplemented(); | |
288 } | |
289 | |
290 // Code for unit testing implementation of NativeFarCall class | |
291 void NativeFarCall::test() { | |
292 Unimplemented(); | |
293 } | |
294 // End code for unit testing implementation of NativeFarCall class | |
295 | |
296 #endif // _LP64 | |
297 | |
298 //------------------------------------------------------------------- | |
299 | |
300 | |
301 void NativeMovConstReg::verify() { | |
302 NativeInstruction::verify(); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
303 // make sure code pattern is actually a "set_metadata" synthetic instruction |
0 | 304 // see MacroAssembler::set_oop() |
305 int i0 = long_at(sethi_offset); | |
306 int i1 = long_at(add_offset); | |
307 | |
308 // verify the pattern "sethi %hi22(imm), reg ; add reg, %lo10(imm), reg" | |
309 Register rd = inv_rd(i0); | |
310 #ifndef _LP64 | |
311 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 && | |
312 is_op3(i1, Assembler::add_op3, Assembler::arith_op) && | |
313 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) && | |
314 rd == inv_rs1(i1) && rd == inv_rd(i1))) { | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
315 fatal("not a set_metadata"); |
0 | 316 } |
317 #else | |
318 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) { | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
319 fatal("not a set_metadata"); |
0 | 320 } |
321 #endif | |
322 } | |
323 | |
324 | |
325 void NativeMovConstReg::print() { | |
326 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data()); | |
327 } | |
328 | |
329 | |
330 #ifdef _LP64 | |
331 intptr_t NativeMovConstReg::data() const { | |
332 return data64(addr_at(sethi_offset), long_at(add_offset)); | |
333 } | |
334 #else | |
335 intptr_t NativeMovConstReg::data() const { | |
336 return data32(long_at(sethi_offset), long_at(add_offset)); | |
337 } | |
338 #endif | |
339 | |
340 | |
341 void NativeMovConstReg::set_data(intptr_t x) { | |
342 #ifdef _LP64 | |
343 set_data64_sethi(addr_at(sethi_offset), x); | |
344 #else | |
345 set_long_at(sethi_offset, set_data32_sethi( long_at(sethi_offset), x)); | |
346 #endif | |
347 set_long_at(add_offset, set_data32_simm13( long_at(add_offset), x)); | |
348 | |
349 // also store the value into an oop_Relocation cell, if any | |
1563
1a5913bf5e19
6951083: oops and relocations should part of nmethod not CodeBlob
twisti
parents:
727
diff
changeset
|
350 CodeBlob* cb = CodeCache::find_blob(instruction_address()); |
1a5913bf5e19
6951083: oops and relocations should part of nmethod not CodeBlob
twisti
parents:
727
diff
changeset
|
351 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL; |
0 | 352 if (nm != NULL) { |
353 RelocIterator iter(nm, instruction_address(), next_instruction_address()); | |
354 oop* oop_addr = NULL; | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
355 Metadata** metadata_addr = NULL; |
0 | 356 while (iter.next()) { |
357 if (iter.type() == relocInfo::oop_type) { | |
358 oop_Relocation *r = iter.oop_reloc(); | |
359 if (oop_addr == NULL) { | |
360 oop_addr = r->oop_addr(); | |
361 *oop_addr = (oop)x; | |
362 } else { | |
363 assert(oop_addr == r->oop_addr(), "must be only one set-oop here"); | |
364 } | |
365 } | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
366 if (iter.type() == relocInfo::metadata_type) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
367 metadata_Relocation *r = iter.metadata_reloc(); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
368 if (metadata_addr == NULL) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
369 metadata_addr = r->metadata_addr(); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
370 *metadata_addr = (Metadata*)x; |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
371 } else { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
372 assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here"); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
373 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
374 } |
0 | 375 } |
376 } | |
377 } | |
378 | |
379 | |
380 // Code for unit testing implementation of NativeMovConstReg class | |
381 void NativeMovConstReg::test() { | |
382 #ifdef ASSERT | |
383 ResourceMark rm; | |
384 CodeBuffer cb("test", 100, 100); | |
385 MacroAssembler* a = new MacroAssembler(&cb); | |
386 NativeMovConstReg* nm; | |
387 uint idx; | |
388 int offsets[] = { | |
389 0x0, | |
390 0x7fffffff, | |
391 0x80000000, | |
392 0xffffffff, | |
393 0x20, | |
394 4096, | |
395 4097, | |
396 }; | |
397 | |
398 VM_Version::allow_all(); | |
399 | |
727 | 400 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type); |
401 a->sethi(al1, I3); | |
402 a->add(I3, al1.low10(), I3); | |
403 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type); | |
404 a->sethi(al2, O2); | |
405 a->add(O2, al2.low10(), O2); | |
0 | 406 |
1748 | 407 nm = nativeMovConstReg_at( cb.insts_begin() ); |
0 | 408 nm->print(); |
409 | |
410 nm = nativeMovConstReg_at( nm->next_instruction_address() ); | |
411 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { | |
412 nm->set_data( offsets[idx] ); | |
413 assert(nm->data() == offsets[idx], "check unit test"); | |
414 } | |
415 nm->print(); | |
416 | |
417 VM_Version::revert(); | |
418 #endif | |
419 } | |
420 // End code for unit testing implementation of NativeMovConstReg class | |
421 | |
422 //------------------------------------------------------------------- | |
423 | |
424 void NativeMovConstRegPatching::verify() { | |
425 NativeInstruction::verify(); | |
426 // Make sure code pattern is sethi/nop/add. | |
427 int i0 = long_at(sethi_offset); | |
428 int i1 = long_at(nop_offset); | |
429 int i2 = long_at(add_offset); | |
430 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); | |
431 | |
432 // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg" | |
433 // The casual reader should note that on Sparc a nop is a special case if sethi | |
434 // in which the destination register is %g0. | |
435 Register rd0 = inv_rd(i0); | |
436 Register rd1 = inv_rd(i1); | |
437 if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 && | |
438 is_op2(i1, Assembler::sethi_op2) && rd1 == G0 && // nop is a special case of sethi | |
439 is_op3(i2, Assembler::add_op3, Assembler::arith_op) && | |
440 inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) && | |
441 rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) { | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
442 fatal("not a set_metadata"); |
0 | 443 } |
444 } | |
445 | |
446 | |
447 void NativeMovConstRegPatching::print() { | |
448 tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, instruction_address(), data()); | |
449 } | |
450 | |
451 | |
452 int NativeMovConstRegPatching::data() const { | |
453 #ifdef _LP64 | |
454 return data64(addr_at(sethi_offset), long_at(add_offset)); | |
455 #else | |
456 return data32(long_at(sethi_offset), long_at(add_offset)); | |
457 #endif | |
458 } | |
459 | |
460 | |
461 void NativeMovConstRegPatching::set_data(int x) { | |
462 #ifdef _LP64 | |
463 set_data64_sethi(addr_at(sethi_offset), x); | |
464 #else | |
465 set_long_at(sethi_offset, set_data32_sethi(long_at(sethi_offset), x)); | |
466 #endif | |
467 set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x)); | |
468 | |
469 // also store the value into an oop_Relocation cell, if any | |
1563
1a5913bf5e19
6951083: oops and relocations should part of nmethod not CodeBlob
twisti
parents:
727
diff
changeset
|
470 CodeBlob* cb = CodeCache::find_blob(instruction_address()); |
1a5913bf5e19
6951083: oops and relocations should part of nmethod not CodeBlob
twisti
parents:
727
diff
changeset
|
471 nmethod* nm = cb ? cb->as_nmethod_or_null() : NULL; |
0 | 472 if (nm != NULL) { |
473 RelocIterator iter(nm, instruction_address(), next_instruction_address()); | |
474 oop* oop_addr = NULL; | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
475 Metadata** metadata_addr = NULL; |
0 | 476 while (iter.next()) { |
477 if (iter.type() == relocInfo::oop_type) { | |
478 oop_Relocation *r = iter.oop_reloc(); | |
479 if (oop_addr == NULL) { | |
480 oop_addr = r->oop_addr(); | |
481 *oop_addr = (oop)x; | |
482 } else { | |
483 assert(oop_addr == r->oop_addr(), "must be only one set-oop here"); | |
484 } | |
485 } | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
486 if (iter.type() == relocInfo::metadata_type) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
487 metadata_Relocation *r = iter.metadata_reloc(); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
488 if (metadata_addr == NULL) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
489 metadata_addr = r->metadata_addr(); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
490 *metadata_addr = (Metadata*)x; |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
491 } else { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
492 assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here"); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
493 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
2426
diff
changeset
|
494 } |
0 | 495 } |
496 } | |
497 } | |
498 | |
499 | |
500 // Code for unit testing implementation of NativeMovConstRegPatching class | |
501 void NativeMovConstRegPatching::test() { | |
502 #ifdef ASSERT | |
503 ResourceMark rm; | |
504 CodeBuffer cb("test", 100, 100); | |
505 MacroAssembler* a = new MacroAssembler(&cb); | |
506 NativeMovConstRegPatching* nm; | |
507 uint idx; | |
508 int offsets[] = { | |
509 0x0, | |
510 0x7fffffff, | |
511 0x80000000, | |
512 0xffffffff, | |
513 0x20, | |
514 4096, | |
515 4097, | |
516 }; | |
517 | |
518 VM_Version::allow_all(); | |
519 | |
727 | 520 AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type); |
521 a->sethi(al1, I3); | |
0 | 522 a->nop(); |
727 | 523 a->add(I3, al1.low10(), I3); |
524 AddressLiteral al2(0xccccdddd, relocInfo::external_word_type); | |
525 a->sethi(al2, O2); | |
0 | 526 a->nop(); |
727 | 527 a->add(O2, al2.low10(), O2); |
0 | 528 |
1748 | 529 nm = nativeMovConstRegPatching_at( cb.insts_begin() ); |
0 | 530 nm->print(); |
531 | |
532 nm = nativeMovConstRegPatching_at( nm->next_instruction_address() ); | |
533 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { | |
534 nm->set_data( offsets[idx] ); | |
535 assert(nm->data() == offsets[idx], "check unit test"); | |
536 } | |
537 nm->print(); | |
538 | |
539 VM_Version::revert(); | |
540 #endif // ASSERT | |
541 } | |
542 // End code for unit testing implementation of NativeMovConstRegPatching class | |
543 | |
544 | |
545 //------------------------------------------------------------------- | |
546 | |
547 | |
548 void NativeMovRegMem::copy_instruction_to(address new_instruction_address) { | |
549 Untested("copy_instruction_to"); | |
550 int instruction_size = next_instruction_address() - instruction_address(); | |
551 for (int i = 0; i < instruction_size; i += BytesPerInstWord) { | |
552 *(int*)(new_instruction_address + i) = *(int*)(address(this) + i); | |
553 } | |
554 } | |
555 | |
556 | |
557 void NativeMovRegMem::verify() { | |
558 NativeInstruction::verify(); | |
559 // make sure code pattern is actually a "ld" or "st" of some sort. | |
560 int i0 = long_at(0); | |
561 int op3 = inv_op3(i0); | |
562 | |
563 assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok"); | |
564 | |
565 if (!(is_op(i0, Assembler::ldst_op) && | |
566 inv_immed(i0) && | |
567 0 != (op3 < op3_ldst_int_limit | |
568 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) | |
569 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) | |
570 { | |
571 int i1 = long_at(ldst_offset); | |
572 Register rd = inv_rd(i0); | |
573 | |
574 op3 = inv_op3(i1); | |
575 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) && | |
576 0 != (op3 < op3_ldst_int_limit | |
577 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) | |
578 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) { | |
579 fatal("not a ld* or st* op"); | |
580 } | |
581 } | |
582 } | |
583 | |
584 | |
585 void NativeMovRegMem::print() { | |
586 if (is_immediate()) { | |
587 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset()); | |
588 } else { | |
589 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address()); | |
590 } | |
591 } | |
592 | |
593 | |
594 // Code for unit testing implementation of NativeMovRegMem class | |
595 void NativeMovRegMem::test() { | |
596 #ifdef ASSERT | |
597 ResourceMark rm; | |
598 CodeBuffer cb("test", 1000, 1000); | |
599 MacroAssembler* a = new MacroAssembler(&cb); | |
600 NativeMovRegMem* nm; | |
601 uint idx = 0; | |
602 uint idx1; | |
603 int offsets[] = { | |
604 0x0, | |
605 0xffffffff, | |
606 0x7fffffff, | |
607 0x80000000, | |
608 4096, | |
609 4097, | |
610 0x20, | |
611 0x4000, | |
612 }; | |
613 | |
614 VM_Version::allow_all(); | |
615 | |
727 | 616 AddressLiteral al1(0xffffffff, relocInfo::external_word_type); |
617 AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type); | |
618 a->ldsw( G5, al1.low10(), G4 ); idx++; | |
619 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 620 a->ldsw( G5, I3, G4 ); idx++; |
727 | 621 a->ldsb( G5, al1.low10(), G4 ); idx++; |
622 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 623 a->ldsb( G5, I3, G4 ); idx++; |
727 | 624 a->ldsh( G5, al1.low10(), G4 ); idx++; |
625 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 626 a->ldsh( G5, I3, G4 ); idx++; |
727 | 627 a->lduw( G5, al1.low10(), G4 ); idx++; |
628 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 629 a->lduw( G5, I3, G4 ); idx++; |
727 | 630 a->ldub( G5, al1.low10(), G4 ); idx++; |
631 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 632 a->ldub( G5, I3, G4 ); idx++; |
727 | 633 a->lduh( G5, al1.low10(), G4 ); idx++; |
634 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 635 a->lduh( G5, I3, G4 ); idx++; |
727 | 636 a->ldx( G5, al1.low10(), G4 ); idx++; |
637 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 638 a->ldx( G5, I3, G4 ); idx++; |
727 | 639 a->ldd( G5, al1.low10(), G4 ); idx++; |
640 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 641 a->ldd( G5, I3, G4 ); idx++; |
642 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; | |
727 | 643 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); |
0 | 644 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; |
645 | |
727 | 646 a->stw( G5, G4, al1.low10() ); idx++; |
647 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 648 a->stw( G5, G4, I3 ); idx++; |
727 | 649 a->stb( G5, G4, al1.low10() ); idx++; |
650 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 651 a->stb( G5, G4, I3 ); idx++; |
727 | 652 a->sth( G5, G4, al1.low10() ); idx++; |
653 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 654 a->sth( G5, G4, I3 ); idx++; |
727 | 655 a->stx( G5, G4, al1.low10() ); idx++; |
656 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 657 a->stx( G5, G4, I3 ); idx++; |
727 | 658 a->std( G5, G4, al1.low10() ); idx++; |
659 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); | |
0 | 660 a->std( G5, G4, I3 ); idx++; |
661 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; | |
727 | 662 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); |
0 | 663 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; |
664 | |
1748 | 665 nm = nativeMovRegMem_at( cb.insts_begin() ); |
0 | 666 nm->print(); |
667 nm->set_offset( low10(0) ); | |
668 nm->print(); | |
669 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); | |
670 nm->print(); | |
671 | |
672 while (--idx) { | |
673 nm = nativeMovRegMem_at( nm->next_instruction_address() ); | |
674 nm->print(); | |
675 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) { | |
676 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] ); | |
677 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]), | |
678 "check unit test"); | |
679 nm->print(); | |
680 } | |
681 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); | |
682 nm->print(); | |
683 } | |
684 | |
685 VM_Version::revert(); | |
686 #endif // ASSERT | |
687 } | |
688 | |
689 // End code for unit testing implementation of NativeMovRegMem class | |
690 | |
691 //-------------------------------------------------------------------------------- | |
692 | |
693 | |
694 void NativeMovRegMemPatching::copy_instruction_to(address new_instruction_address) { | |
695 Untested("copy_instruction_to"); | |
696 int instruction_size = next_instruction_address() - instruction_address(); | |
697 for (int i = 0; i < instruction_size; i += wordSize) { | |
698 *(long*)(new_instruction_address + i) = *(long*)(address(this) + i); | |
699 } | |
700 } | |
701 | |
702 | |
703 void NativeMovRegMemPatching::verify() { | |
704 NativeInstruction::verify(); | |
705 // make sure code pattern is actually a "ld" or "st" of some sort. | |
706 int i0 = long_at(0); | |
707 int op3 = inv_op3(i0); | |
708 | |
709 assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); | |
710 | |
711 if (!(is_op(i0, Assembler::ldst_op) && | |
712 inv_immed(i0) && | |
713 0 != (op3 < op3_ldst_int_limit | |
714 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) | |
715 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf)))) { | |
716 int i1 = long_at(ldst_offset); | |
717 Register rd = inv_rd(i0); | |
718 | |
719 op3 = inv_op3(i1); | |
720 if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) && | |
721 0 != (op3 < op3_ldst_int_limit | |
722 ? (1 << op3 ) & (op3_mask_ld | op3_mask_st) | |
723 : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) { | |
724 fatal("not a ld* or st* op"); | |
725 } | |
726 } | |
727 } | |
728 | |
729 | |
730 void NativeMovRegMemPatching::print() { | |
731 if (is_immediate()) { | |
732 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %x]", instruction_address(), offset()); | |
733 } else { | |
734 tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", instruction_address()); | |
735 } | |
736 } | |
737 | |
738 | |
739 // Code for unit testing implementation of NativeMovRegMemPatching class | |
740 void NativeMovRegMemPatching::test() { | |
741 #ifdef ASSERT | |
742 ResourceMark rm; | |
743 CodeBuffer cb("test", 1000, 1000); | |
744 MacroAssembler* a = new MacroAssembler(&cb); | |
745 NativeMovRegMemPatching* nm; | |
746 uint idx = 0; | |
747 uint idx1; | |
748 int offsets[] = { | |
749 0x0, | |
750 0xffffffff, | |
751 0x7fffffff, | |
752 0x80000000, | |
753 4096, | |
754 4097, | |
755 0x20, | |
756 0x4000, | |
757 }; | |
758 | |
759 VM_Version::allow_all(); | |
760 | |
727 | 761 AddressLiteral al(0xffffffff, relocInfo::external_word_type); |
762 a->ldsw( G5, al.low10(), G4); idx++; | |
763 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 764 a->ldsw( G5, I3, G4 ); idx++; |
727 | 765 a->ldsb( G5, al.low10(), G4); idx++; |
766 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 767 a->ldsb( G5, I3, G4 ); idx++; |
727 | 768 a->ldsh( G5, al.low10(), G4); idx++; |
769 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 770 a->ldsh( G5, I3, G4 ); idx++; |
727 | 771 a->lduw( G5, al.low10(), G4); idx++; |
772 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 773 a->lduw( G5, I3, G4 ); idx++; |
727 | 774 a->ldub( G5, al.low10(), G4); idx++; |
775 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 776 a->ldub( G5, I3, G4 ); idx++; |
727 | 777 a->lduh( G5, al.low10(), G4); idx++; |
778 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 779 a->lduh( G5, I3, G4 ); idx++; |
727 | 780 a->ldx( G5, al.low10(), G4); idx++; |
781 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
782 a->ldx( G5, I3, G4 ); idx++; | |
783 a->ldd( G5, al.low10(), G4); idx++; | |
784 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
785 a->ldd( G5, I3, G4 ); idx++; | |
786 a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++; | |
787 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
788 a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++; | |
0 | 789 |
727 | 790 a->stw( G5, G4, al.low10()); idx++; |
791 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 792 a->stw( G5, G4, I3 ); idx++; |
727 | 793 a->stb( G5, G4, al.low10()); idx++; |
794 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 795 a->stb( G5, G4, I3 ); idx++; |
727 | 796 a->sth( G5, G4, al.low10()); idx++; |
797 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 798 a->sth( G5, G4, I3 ); idx++; |
727 | 799 a->stx( G5, G4, al.low10()); idx++; |
800 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 801 a->stx( G5, G4, I3 ); idx++; |
727 | 802 a->std( G5, G4, al.low10()); idx++; |
803 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); | |
0 | 804 a->std( G5, G4, I3 ); idx++; |
805 a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++; | |
727 | 806 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); |
0 | 807 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; |
808 | |
1748 | 809 nm = nativeMovRegMemPatching_at( cb.insts_begin() ); |
0 | 810 nm->print(); |
811 nm->set_offset( low10(0) ); | |
812 nm->print(); | |
813 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); | |
814 nm->print(); | |
815 | |
816 while (--idx) { | |
817 nm = nativeMovRegMemPatching_at( nm->next_instruction_address() ); | |
818 nm->print(); | |
819 for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) { | |
820 nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] ); | |
821 assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]), | |
822 "check unit test"); | |
823 nm->print(); | |
824 } | |
825 nm->add_offset_in_bytes( low10(0xbb) * wordSize ); | |
826 nm->print(); | |
827 } | |
828 | |
829 VM_Version::revert(); | |
830 #endif // ASSERT | |
831 } | |
832 // End code for unit testing implementation of NativeMovRegMemPatching class | |
833 | |
834 | |
835 //-------------------------------------------------------------------------------- | |
836 | |
837 | |
838 void NativeJump::verify() { | |
839 NativeInstruction::verify(); | |
840 int i0 = long_at(sethi_offset); | |
841 int i1 = long_at(jmpl_offset); | |
842 assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok"); | |
843 // verify the pattern "sethi %hi22(imm), treg ; jmpl treg, %lo10(imm), lreg" | |
844 Register rd = inv_rd(i0); | |
845 #ifndef _LP64 | |
846 if (!(is_op2(i0, Assembler::sethi_op2) && rd != G0 && | |
847 (is_op3(i1, Assembler::jmpl_op3, Assembler::arith_op) || | |
848 (TraceJumps && is_op3(i1, Assembler::add_op3, Assembler::arith_op))) && | |
849 inv_immed(i1) && (unsigned)get_simm13(i1) < (1 << 10) && | |
850 rd == inv_rs1(i1))) { | |
851 fatal("not a jump_to instruction"); | |
852 } | |
853 #else | |
854 // In LP64, the jump instruction location varies for non relocatable | |
855 // jumps, for example is could be sethi, xor, jmp instead of the | |
856 // 7 instructions for sethi. So let's check sethi only. | |
857 if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) { | |
858 fatal("not a jump_to instruction"); | |
859 } | |
860 #endif | |
861 } | |
862 | |
863 | |
864 void NativeJump::print() { | |
865 tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, instruction_address(), jump_destination()); | |
866 } | |
867 | |
868 | |
869 // Code for unit testing implementation of NativeJump class | |
870 void NativeJump::test() { | |
871 #ifdef ASSERT | |
872 ResourceMark rm; | |
873 CodeBuffer cb("test", 100, 100); | |
874 MacroAssembler* a = new MacroAssembler(&cb); | |
875 NativeJump* nj; | |
876 uint idx; | |
877 int offsets[] = { | |
878 0x0, | |
879 0xffffffff, | |
880 0x7fffffff, | |
881 0x80000000, | |
882 4096, | |
883 4097, | |
884 0x20, | |
885 0x4000, | |
886 }; | |
887 | |
888 VM_Version::allow_all(); | |
889 | |
727 | 890 AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type); |
891 a->sethi(al, I3); | |
892 a->jmpl(I3, al.low10(), G0, RelocationHolder::none); | |
0 | 893 a->delayed()->nop(); |
727 | 894 a->sethi(al, I3); |
895 a->jmpl(I3, al.low10(), L3, RelocationHolder::none); | |
0 | 896 a->delayed()->nop(); |
897 | |
1748 | 898 nj = nativeJump_at( cb.insts_begin() ); |
0 | 899 nj->print(); |
900 | |
901 nj = nativeJump_at( nj->next_instruction_address() ); | |
902 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { | |
903 nj->set_jump_destination( nj->instruction_address() + offsets[idx] ); | |
904 assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test"); | |
905 nj->print(); | |
906 } | |
907 | |
908 VM_Version::revert(); | |
909 #endif // ASSERT | |
910 } | |
911 // End code for unit testing implementation of NativeJump class | |
912 | |
913 | |
914 void NativeJump::insert(address code_pos, address entry) { | |
915 Unimplemented(); | |
916 } | |
917 | |
918 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie) | |
919 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot). | |
920 // Atomic write can be only with 1 word. | |
921 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) { | |
922 // Here's one way to do it: Pre-allocate a three-word jump sequence somewhere | |
923 // in the header of the nmethod, within a short branch's span of the patch point. | |
924 // Set up the jump sequence using NativeJump::insert, and then use an annulled | |
925 // unconditional branch at the target site (an atomic 1-word update). | |
926 // Limitations: You can only patch nmethods, with any given nmethod patched at | |
927 // most once, and the patch must be in the nmethod's header. | |
928 // It's messy, but you can ask the CodeCache for the nmethod containing the | |
929 // target address. | |
930 | |
931 // %%%%% For now, do something MT-stupid: | |
932 ResourceMark rm; | |
933 int code_size = 1 * BytesPerInstWord; | |
934 CodeBuffer cb(verified_entry, code_size + 1); | |
935 MacroAssembler* a = new MacroAssembler(&cb); | |
936 if (VM_Version::v9_instructions_work()) { | |
937 a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler | |
938 } else { | |
939 a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler | |
940 } | |
941 ICache::invalidate_range(verified_entry, code_size); | |
942 } | |
943 | |
944 | |
945 void NativeIllegalInstruction::insert(address code_pos) { | |
946 NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos); | |
947 nii->set_long_at(0, illegal_instruction()); | |
948 } | |
949 | |
950 static int illegal_instruction_bits = 0; | |
951 | |
952 int NativeInstruction::illegal_instruction() { | |
953 if (illegal_instruction_bits == 0) { | |
954 ResourceMark rm; | |
955 char buf[40]; | |
956 CodeBuffer cbuf((address)&buf[0], 20); | |
957 MacroAssembler* a = new MacroAssembler(&cbuf); | |
958 address ia = a->pc(); | |
959 a->trap(ST_RESERVED_FOR_USER_0 + 1); | |
960 int bits = *(int*)ia; | |
961 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction"); | |
962 illegal_instruction_bits = bits; | |
963 assert(illegal_instruction_bits != 0, "oops"); | |
964 } | |
965 return illegal_instruction_bits; | |
966 } | |
967 | |
968 static int ic_miss_trap_bits = 0; | |
969 | |
970 bool NativeInstruction::is_ic_miss_trap() { | |
971 if (ic_miss_trap_bits == 0) { | |
972 ResourceMark rm; | |
973 char buf[40]; | |
974 CodeBuffer cbuf((address)&buf[0], 20); | |
975 MacroAssembler* a = new MacroAssembler(&cbuf); | |
976 address ia = a->pc(); | |
977 a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2); | |
978 int bits = *(int*)ia; | |
979 assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction"); | |
980 ic_miss_trap_bits = bits; | |
981 assert(ic_miss_trap_bits != 0, "oops"); | |
982 } | |
983 return long_at(0) == ic_miss_trap_bits; | |
984 } | |
985 | |
986 | |
987 bool NativeInstruction::is_illegal() { | |
988 if (illegal_instruction_bits == 0) { | |
989 return false; | |
990 } | |
991 return long_at(0) == illegal_instruction_bits; | |
992 } | |
993 | |
994 | |
995 void NativeGeneralJump::verify() { | |
996 assert(((NativeInstruction *)this)->is_jump() || | |
997 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction"); | |
998 } | |
999 | |
1000 | |
1001 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { | |
1002 Assembler::Condition condition = Assembler::always; | |
1003 int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) | | |
1004 Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22); | |
1005 NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos); | |
1006 ni->set_long_at(0, x); | |
1007 } | |
1008 | |
1009 | |
1010 // MT-safe patching of a jmp instruction (and following word). | |
1011 // First patches the second word, and then atomicly replaces | |
1012 // the first word with the first new instruction word. | |
1013 // Other processors might briefly see the old first word | |
1014 // followed by the new second word. This is OK if the old | |
1015 // second word is harmless, and the new second word may be | |
1016 // harmlessly executed in the delay slot of the call. | |
1017 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) { | |
1018 assert(Patching_lock->is_locked() || | |
1019 SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); | |
1020 assert (instr_addr != NULL, "illegal address for code patching"); | |
1021 NativeGeneralJump* h_jump = nativeGeneralJump_at (instr_addr); // checking that it is a call | |
1022 assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8"); | |
1023 int i0 = ((int*)code_buffer)[0]; | |
1024 int i1 = ((int*)code_buffer)[1]; | |
1025 int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord); | |
1026 assert(inv_op(*contention_addr) == Assembler::arith_op || | |
1027 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), | |
1028 "must not interfere with original call"); | |
1029 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order | |
1030 h_jump->set_long_at(1*BytesPerInstWord, i1); | |
1031 h_jump->set_long_at(0*BytesPerInstWord, i0); | |
1032 // NOTE: It is possible that another thread T will execute | |
1033 // only the second patched word. | |
1034 // In other words, since the original instruction is this | |
1035 // jmp patching_stub; nop (NativeGeneralJump) | |
1036 // and the new sequence from the buffer is this: | |
1037 // sethi %hi(K), %r; add %r, %lo(K), %r (NativeMovConstReg) | |
1038 // what T will execute is this: | |
1039 // jmp patching_stub; add %r, %lo(K), %r | |
1040 // thereby putting garbage into %r before calling the patching stub. | |
1041 // This is OK, because the patching stub ignores the value of %r. | |
1042 | |
1043 // Make sure the first-patched instruction, which may co-exist | |
1044 // briefly with the call, will do something harmless. | |
1045 assert(inv_op(*contention_addr) == Assembler::arith_op || | |
1046 *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), | |
1047 "must not interfere with original call"); | |
1048 } |