Mercurial > hg > truffle
annotate src/cpu/x86/vm/c1_CodeStubs_x86.cpp @ 3101:6ccb95c97e6d
IdealGraphVisualizer: Work around a problem with JSplitPane and the NetBeans editor: setDividerLocation() doesn't work when the split pane has not been layouted and painted yet. JSplitPane then initially uses a tiny width for the left editor component, which causes the editor to calculate invalid offsets and constantly throw exceptions, particularly on mouse events. Thus, defer adding the two components and setting the divider's location.
author | Peter Hofer <peter.hofer@jku.at> |
---|---|
date | Thu, 30 Jun 2011 12:17:27 +0200 |
parents | d86923d96dca |
children | 5d046bf49ce7 |
rev | line source |
---|---|
0 | 1 /* |
2168
e4fee0bdaa85
7008809: should report the class in ArrayStoreExceptions from compiled code
never
parents:
2002
diff
changeset
|
2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1295
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1295
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1295
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "c1/c1_CodeStubs.hpp" | |
27 #include "c1/c1_FrameMap.hpp" | |
28 #include "c1/c1_LIRAssembler.hpp" | |
29 #include "c1/c1_MacroAssembler.hpp" | |
30 #include "c1/c1_Runtime1.hpp" | |
31 #include "nativeInst_x86.hpp" | |
32 #include "runtime/sharedRuntime.hpp" | |
33 #include "vmreg_x86.inline.hpp" | |
34 #ifndef SERIALGC | |
35 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" | |
36 #endif | |
0 | 37 |
38 | |
39 #define __ ce->masm()-> | |
40 | |
41 float ConversionStub::float_zero = 0.0; | |
42 double ConversionStub::double_zero = 0.0; | |
43 | |
44 void ConversionStub::emit_code(LIR_Assembler* ce) { | |
45 __ bind(_entry); | |
46 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub"); | |
47 | |
48 | |
49 if (input()->is_single_xmm()) { | |
50 __ comiss(input()->as_xmm_float_reg(), | |
51 ExternalAddress((address)&float_zero)); | |
52 } else if (input()->is_double_xmm()) { | |
53 __ comisd(input()->as_xmm_double_reg(), | |
54 ExternalAddress((address)&double_zero)); | |
55 } else { | |
304 | 56 LP64_ONLY(ShouldNotReachHere()); |
57 __ push(rax); | |
0 | 58 __ ftst(); |
59 __ fnstsw_ax(); | |
60 __ sahf(); | |
304 | 61 __ pop(rax); |
0 | 62 } |
63 | |
64 Label NaN, do_return; | |
65 __ jccb(Assembler::parity, NaN); | |
66 __ jccb(Assembler::below, do_return); | |
67 | |
68 // input is > 0 -> return maxInt | |
69 // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff | |
70 __ decrement(result()->as_register()); | |
71 __ jmpb(do_return); | |
72 | |
73 // input is NaN -> return 0 | |
74 __ bind(NaN); | |
304 | 75 __ xorptr(result()->as_register(), result()->as_register()); |
0 | 76 |
77 __ bind(do_return); | |
78 __ jmp(_continuation); | |
79 } | |
80 | |
81 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { | |
82 __ bind(_entry); | |
1783 | 83 ce->store_parameter(_method->as_register(), 1); |
0 | 84 ce->store_parameter(_bci, 0); |
85 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); | |
86 ce->add_call_info_here(_info); | |
87 ce->verify_oop_map(_info); | |
88 __ jmp(_continuation); | |
89 } | |
90 | |
91 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, | |
92 bool throw_index_out_of_bounds_exception) | |
93 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception) | |
94 , _index(index) | |
95 { | |
1819 | 96 assert(info != NULL, "must have info"); |
97 _info = new CodeEmitInfo(info); | |
0 | 98 } |
99 | |
100 | |
101 void RangeCheckStub::emit_code(LIR_Assembler* ce) { | |
102 __ bind(_entry); | |
103 // pass the array index on stack because all registers must be preserved | |
104 if (_index->is_cpu_register()) { | |
105 ce->store_parameter(_index->as_register(), 0); | |
106 } else { | |
107 ce->store_parameter(_index->as_jint(), 0); | |
108 } | |
109 Runtime1::StubID stub_id; | |
110 if (_throw_index_out_of_bounds_exception) { | |
111 stub_id = Runtime1::throw_index_exception_id; | |
112 } else { | |
113 stub_id = Runtime1::throw_range_check_failed_id; | |
114 } | |
115 __ call(RuntimeAddress(Runtime1::entry_for(stub_id))); | |
116 ce->add_call_info_here(_info); | |
117 debug_only(__ should_not_reach_here()); | |
118 } | |
119 | |
120 | |
121 void DivByZeroStub::emit_code(LIR_Assembler* ce) { | |
122 if (_offset != -1) { | |
123 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); | |
124 } | |
125 __ bind(_entry); | |
126 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id))); | |
127 ce->add_call_info_here(_info); | |
128 debug_only(__ should_not_reach_here()); | |
129 } | |
130 | |
131 | |
132 // Implementation of NewInstanceStub | |
133 | |
134 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { | |
135 _result = result; | |
136 _klass = klass; | |
137 _klass_reg = klass_reg; | |
138 _info = new CodeEmitInfo(info); | |
139 assert(stub_id == Runtime1::new_instance_id || | |
140 stub_id == Runtime1::fast_new_instance_id || | |
141 stub_id == Runtime1::fast_new_instance_init_check_id, | |
142 "need new_instance id"); | |
143 _stub_id = stub_id; | |
144 } | |
145 | |
146 | |
147 void NewInstanceStub::emit_code(LIR_Assembler* ce) { | |
148 assert(__ rsp_offset() == 0, "frame size should be fixed"); | |
149 __ bind(_entry); | |
304 | 150 __ movptr(rdx, _klass_reg->as_register()); |
0 | 151 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id))); |
152 ce->add_call_info_here(_info); | |
153 ce->verify_oop_map(_info); | |
154 assert(_result->as_register() == rax, "result must in rax,"); | |
155 __ jmp(_continuation); | |
156 } | |
157 | |
158 | |
159 // Implementation of NewTypeArrayStub | |
160 | |
161 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { | |
162 _klass_reg = klass_reg; | |
163 _length = length; | |
164 _result = result; | |
165 _info = new CodeEmitInfo(info); | |
166 } | |
167 | |
168 | |
169 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { | |
170 assert(__ rsp_offset() == 0, "frame size should be fixed"); | |
171 __ bind(_entry); | |
172 assert(_length->as_register() == rbx, "length must in rbx,"); | |
173 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); | |
174 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); | |
175 ce->add_call_info_here(_info); | |
176 ce->verify_oop_map(_info); | |
177 assert(_result->as_register() == rax, "result must in rax,"); | |
178 __ jmp(_continuation); | |
179 } | |
180 | |
181 | |
182 // Implementation of NewObjectArrayStub | |
183 | |
184 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { | |
185 _klass_reg = klass_reg; | |
186 _result = result; | |
187 _length = length; | |
188 _info = new CodeEmitInfo(info); | |
189 } | |
190 | |
191 | |
192 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { | |
193 assert(__ rsp_offset() == 0, "frame size should be fixed"); | |
194 __ bind(_entry); | |
195 assert(_length->as_register() == rbx, "length must in rbx,"); | |
196 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); | |
197 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); | |
198 ce->add_call_info_here(_info); | |
199 ce->verify_oop_map(_info); | |
200 assert(_result->as_register() == rax, "result must in rax,"); | |
201 __ jmp(_continuation); | |
202 } | |
203 | |
204 | |
205 // Implementation of MonitorAccessStubs | |
206 | |
207 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) | |
208 : MonitorAccessStub(obj_reg, lock_reg) | |
209 { | |
210 _info = new CodeEmitInfo(info); | |
211 } | |
212 | |
213 | |
214 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { | |
215 assert(__ rsp_offset() == 0, "frame size should be fixed"); | |
216 __ bind(_entry); | |
217 ce->store_parameter(_obj_reg->as_register(), 1); | |
218 ce->store_parameter(_lock_reg->as_register(), 0); | |
219 Runtime1::StubID enter_id; | |
220 if (ce->compilation()->has_fpu_code()) { | |
221 enter_id = Runtime1::monitorenter_id; | |
222 } else { | |
223 enter_id = Runtime1::monitorenter_nofpu_id; | |
224 } | |
225 __ call(RuntimeAddress(Runtime1::entry_for(enter_id))); | |
226 ce->add_call_info_here(_info); | |
227 ce->verify_oop_map(_info); | |
228 __ jmp(_continuation); | |
229 } | |
230 | |
231 | |
232 void MonitorExitStub::emit_code(LIR_Assembler* ce) { | |
233 __ bind(_entry); | |
234 if (_compute_lock) { | |
235 // lock_reg was destroyed by fast unlocking attempt => recompute it | |
236 ce->monitor_address(_monitor_ix, _lock_reg); | |
237 } | |
238 ce->store_parameter(_lock_reg->as_register(), 0); | |
239 // note: non-blocking leaf routine => no call info needed | |
240 Runtime1::StubID exit_id; | |
241 if (ce->compilation()->has_fpu_code()) { | |
242 exit_id = Runtime1::monitorexit_id; | |
243 } else { | |
244 exit_id = Runtime1::monitorexit_nofpu_id; | |
245 } | |
246 __ call(RuntimeAddress(Runtime1::entry_for(exit_id))); | |
247 __ jmp(_continuation); | |
248 } | |
249 | |
250 | |
251 // Implementation of patching: | |
252 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) | |
253 // - Replace original code with a call to the stub | |
254 // At Runtime: | |
255 // - call to stub, jump to runtime | |
256 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) | |
257 // - in runtime: after initializing class, restore original code, reexecute instruction | |
258 | |
259 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; | |
260 | |
261 void PatchingStub::align_patch_site(MacroAssembler* masm) { | |
262 // We're patching a 5-7 byte instruction on intel and we need to | |
263 // make sure that we don't see a piece of the instruction. It | |
264 // appears mostly impossible on Intel to simply invalidate other | |
265 // processors caches and since they may do aggressive prefetch it's | |
266 // very hard to make a guess about what code might be in the icache. | |
267 // Force the instruction to be double word aligned so that it | |
268 // doesn't span a cache line. | |
269 masm->align(round_to(NativeGeneralJump::instruction_size, wordSize)); | |
270 } | |
271 | |
272 void PatchingStub::emit_code(LIR_Assembler* ce) { | |
273 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); | |
274 | |
275 Label call_patch; | |
276 | |
277 // static field accesses have special semantics while the class | |
278 // initializer is being run so we emit a test which can be used to | |
279 // check that this code is being executed by the initializing | |
280 // thread. | |
281 address being_initialized_entry = __ pc(); | |
282 if (CommentedAssembly) { | |
283 __ block_comment(" patch template"); | |
284 } | |
285 if (_id == load_klass_id) { | |
286 // produce a copy of the load klass instruction for use by the being initialized case | |
287 address start = __ pc(); | |
288 jobject o = NULL; | |
289 __ movoop(_obj, o); | |
290 #ifdef ASSERT | |
291 for (int i = 0; i < _bytes_to_copy; i++) { | |
292 address ptr = (address)(_pc_start + i); | |
293 int a_byte = (*ptr) & 0xFF; | |
294 assert(a_byte == *start++, "should be the same code"); | |
295 } | |
296 #endif | |
297 } else { | |
298 // make a copy the code which is going to be patched. | |
299 for ( int i = 0; i < _bytes_to_copy; i++) { | |
300 address ptr = (address)(_pc_start + i); | |
301 int a_byte = (*ptr) & 0xFF; | |
302 __ a_byte (a_byte); | |
303 *ptr = 0x90; // make the site look like a nop | |
304 } | |
305 } | |
306 | |
307 address end_of_patch = __ pc(); | |
308 int bytes_to_skip = 0; | |
309 if (_id == load_klass_id) { | |
310 int offset = __ offset(); | |
311 if (CommentedAssembly) { | |
312 __ block_comment(" being_initialized check"); | |
313 } | |
314 assert(_obj != noreg, "must be a valid register"); | |
315 Register tmp = rax; | |
2376
c7f3d0b4570f
7017732: move static fields into Class to prepare for perm gen removal
never
parents:
2168
diff
changeset
|
316 Register tmp2 = rbx; |
304 | 317 __ push(tmp); |
2376
c7f3d0b4570f
7017732: move static fields into Class to prepare for perm gen removal
never
parents:
2168
diff
changeset
|
318 __ push(tmp2); |
2464
d86923d96dca
7034967: C1: assert(false) failed: error (assembler_sparc.cpp:2043)
iveresov
parents:
2380
diff
changeset
|
319 // Load without verification to keep code size small. We need it because |
d86923d96dca
7034967: C1: assert(false) failed: error (assembler_sparc.cpp:2043)
iveresov
parents:
2380
diff
changeset
|
320 // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null. |
d86923d96dca
7034967: C1: assert(false) failed: error (assembler_sparc.cpp:2043)
iveresov
parents:
2380
diff
changeset
|
321 __ load_heap_oop_not_null(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes())); |
0 | 322 __ get_thread(tmp); |
2376
c7f3d0b4570f
7017732: move static fields into Class to prepare for perm gen removal
never
parents:
2168
diff
changeset
|
323 __ cmpptr(tmp, Address(tmp2, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); |
c7f3d0b4570f
7017732: move static fields into Class to prepare for perm gen removal
never
parents:
2168
diff
changeset
|
324 __ pop(tmp2); |
304 | 325 __ pop(tmp); |
0 | 326 __ jcc(Assembler::notEqual, call_patch); |
327 | |
328 // access_field patches may execute the patched code before it's | |
329 // copied back into place so we need to jump back into the main | |
330 // code of the nmethod to continue execution. | |
331 __ jmp(_patch_site_continuation); | |
332 | |
333 // make sure this extra code gets skipped | |
334 bytes_to_skip += __ offset() - offset; | |
335 } | |
336 if (CommentedAssembly) { | |
337 __ block_comment("patch data encoded as movl"); | |
338 } | |
339 // Now emit the patch record telling the runtime how to find the | |
340 // pieces of the patch. We only need 3 bytes but for readability of | |
341 // the disassembly we make the data look like a movl reg, imm32, | |
342 // which requires 5 bytes | |
343 int sizeof_patch_record = 5; | |
344 bytes_to_skip += sizeof_patch_record; | |
345 | |
346 // emit the offsets needed to find the code to patch | |
347 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record; | |
348 | |
349 __ a_byte(0xB8); | |
350 __ a_byte(0); | |
351 __ a_byte(being_initialized_entry_offset); | |
352 __ a_byte(bytes_to_skip); | |
353 __ a_byte(_bytes_to_copy); | |
354 address patch_info_pc = __ pc(); | |
355 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); | |
356 | |
357 address entry = __ pc(); | |
358 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); | |
359 address target = NULL; | |
360 switch (_id) { | |
361 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; | |
362 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break; | |
363 default: ShouldNotReachHere(); | |
364 } | |
365 __ bind(call_patch); | |
366 | |
367 if (CommentedAssembly) { | |
368 __ block_comment("patch entry point"); | |
369 } | |
370 __ call(RuntimeAddress(target)); | |
371 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); | |
372 ce->add_call_info_here(_info); | |
373 int jmp_off = __ offset(); | |
374 __ jmp(_patch_site_entry); | |
375 // Add enough nops so deoptimization can overwrite the jmp above with a call | |
376 // and not destroy the world. | |
377 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) { | |
378 __ nop(); | |
379 } | |
380 if (_id == load_klass_id) { | |
381 CodeSection* cs = __ code_section(); | |
382 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); | |
383 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, relocInfo::oop_type, relocInfo::none); | |
384 } | |
385 } | |
386 | |
387 | |
1295 | 388 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { |
389 __ bind(_entry); | |
390 __ call(RuntimeAddress(SharedRuntime::deopt_blob()->unpack_with_reexecution())); | |
391 ce->add_call_info_here(_info); | |
392 debug_only(__ should_not_reach_here()); | |
393 } | |
394 | |
395 | |
0 | 396 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { |
397 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); | |
398 __ bind(_entry); | |
399 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id))); | |
400 ce->add_call_info_here(_info); | |
401 debug_only(__ should_not_reach_here()); | |
402 } | |
403 | |
404 | |
405 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { | |
406 assert(__ rsp_offset() == 0, "frame size should be fixed"); | |
407 | |
408 __ bind(_entry); | |
409 // pass the object on stack because all registers must be preserved | |
410 if (_obj->is_cpu_register()) { | |
411 ce->store_parameter(_obj->as_register(), 0); | |
412 } | |
413 __ call(RuntimeAddress(Runtime1::entry_for(_stub))); | |
414 ce->add_call_info_here(_info); | |
415 debug_only(__ should_not_reach_here()); | |
416 } | |
417 | |
418 | |
419 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { | |
420 //---------------slow case: call to native----------------- | |
421 __ bind(_entry); | |
422 // Figure out where the args should go | |
423 // This should really convert the IntrinsicID to the methodOop and signature | |
424 // but I don't know how to do that. | |
425 // | |
426 VMRegPair args[5]; | |
427 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT}; | |
428 SharedRuntime::java_calling_convention(signature, args, 5, true); | |
429 | |
430 // push parameters | |
431 // (src, src_pos, dest, destPos, length) | |
432 Register r[5]; | |
433 r[0] = src()->as_register(); | |
434 r[1] = src_pos()->as_register(); | |
435 r[2] = dst()->as_register(); | |
436 r[3] = dst_pos()->as_register(); | |
437 r[4] = length()->as_register(); | |
438 | |
439 // next registers will get stored on the stack | |
440 for (int i = 0; i < 5 ; i++ ) { | |
441 VMReg r_1 = args[i].first(); | |
442 if (r_1->is_stack()) { | |
443 int st_off = r_1->reg2stack() * wordSize; | |
304 | 444 __ movptr (Address(rsp, st_off), r[i]); |
0 | 445 } else { |
446 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); | |
447 } | |
448 } | |
449 | |
450 ce->align_call(lir_static_call); | |
451 | |
452 ce->emit_static_call_stub(); | |
453 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(), | |
454 relocInfo::static_call_type); | |
455 __ call(resolve); | |
456 ce->add_call_info_here(info()); | |
457 | |
458 #ifndef PRODUCT | |
304 | 459 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); |
0 | 460 #endif |
461 | |
462 __ jmp(_continuation); | |
463 } | |
464 | |
342 | 465 ///////////////////////////////////////////////////////////////////////////// |
466 #ifndef SERIALGC | |
467 | |
468 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { | |
469 | |
470 // At this point we know that marking is in progress | |
471 | |
472 __ bind(_entry); | |
473 assert(pre_val()->is_register(), "Precondition."); | |
474 | |
475 Register pre_val_reg = pre_val()->as_register(); | |
476 | |
2002 | 477 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); |
342 | 478 |
362 | 479 __ cmpptr(pre_val_reg, (int32_t) NULL_WORD); |
342 | 480 __ jcc(Assembler::equal, _continuation); |
481 ce->store_parameter(pre_val()->as_register(), 0); | |
482 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id))); | |
483 __ jmp(_continuation); | |
484 | |
485 } | |
486 | |
487 jbyte* G1PostBarrierStub::_byte_map_base = NULL; | |
488 | |
489 jbyte* G1PostBarrierStub::byte_map_base_slow() { | |
490 BarrierSet* bs = Universe::heap()->barrier_set(); | |
491 assert(bs->is_a(BarrierSet::G1SATBCTLogging), | |
492 "Must be if we're using this."); | |
493 return ((G1SATBCardTableModRefBS*)bs)->byte_map_base; | |
494 } | |
495 | |
496 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { | |
497 __ bind(_entry); | |
498 assert(addr()->is_register(), "Precondition."); | |
499 assert(new_val()->is_register(), "Precondition."); | |
500 Register new_val_reg = new_val()->as_register(); | |
362 | 501 __ cmpptr(new_val_reg, (int32_t) NULL_WORD); |
342 | 502 __ jcc(Assembler::equal, _continuation); |
1873 | 503 ce->store_parameter(addr()->as_pointer_register(), 0); |
342 | 504 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id))); |
505 __ jmp(_continuation); | |
506 } | |
507 | |
508 #endif // SERIALGC | |
509 ///////////////////////////////////////////////////////////////////////////// | |
0 | 510 |
511 #undef __ |