Mercurial > hg > truffle
comparison src/cpu/x86/vm/c1_CodeStubs_x86.cpp @ 0:a61af66fc99e jdk7-b24
Initial load
author | duke |
---|---|
date | Sat, 01 Dec 2007 00:00:00 +0000 |
parents | |
children | dc7f315e41f7 37f87013dfd8 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a61af66fc99e |
---|---|
1 /* | |
2 * Copyright 1999-2006 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_c1_CodeStubs_x86.cpp.incl" | |
27 | |
28 | |
29 #define __ ce->masm()-> | |
30 | |
31 float ConversionStub::float_zero = 0.0; | |
32 double ConversionStub::double_zero = 0.0; | |
33 | |
34 void ConversionStub::emit_code(LIR_Assembler* ce) { | |
35 __ bind(_entry); | |
36 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub"); | |
37 | |
38 | |
39 if (input()->is_single_xmm()) { | |
40 __ comiss(input()->as_xmm_float_reg(), | |
41 ExternalAddress((address)&float_zero)); | |
42 } else if (input()->is_double_xmm()) { | |
43 __ comisd(input()->as_xmm_double_reg(), | |
44 ExternalAddress((address)&double_zero)); | |
45 } else { | |
46 __ pushl(rax); | |
47 __ ftst(); | |
48 __ fnstsw_ax(); | |
49 __ sahf(); | |
50 __ popl(rax); | |
51 } | |
52 | |
53 Label NaN, do_return; | |
54 __ jccb(Assembler::parity, NaN); | |
55 __ jccb(Assembler::below, do_return); | |
56 | |
57 // input is > 0 -> return maxInt | |
58 // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff | |
59 __ decrement(result()->as_register()); | |
60 __ jmpb(do_return); | |
61 | |
62 // input is NaN -> return 0 | |
63 __ bind(NaN); | |
64 __ xorl(result()->as_register(), result()->as_register()); | |
65 | |
66 __ bind(do_return); | |
67 __ jmp(_continuation); | |
68 } | |
69 | |
70 #ifdef TIERED | |
71 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { | |
72 __ bind(_entry); | |
73 ce->store_parameter(_bci, 0); | |
74 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); | |
75 ce->add_call_info_here(_info); | |
76 ce->verify_oop_map(_info); | |
77 | |
78 __ jmp(_continuation); | |
79 } | |
80 #endif // TIERED | |
81 | |
82 | |
83 | |
84 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, | |
85 bool throw_index_out_of_bounds_exception) | |
86 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception) | |
87 , _index(index) | |
88 { | |
89 _info = info == NULL ? NULL : new CodeEmitInfo(info); | |
90 } | |
91 | |
92 | |
93 void RangeCheckStub::emit_code(LIR_Assembler* ce) { | |
94 __ bind(_entry); | |
95 // pass the array index on stack because all registers must be preserved | |
96 if (_index->is_cpu_register()) { | |
97 ce->store_parameter(_index->as_register(), 0); | |
98 } else { | |
99 ce->store_parameter(_index->as_jint(), 0); | |
100 } | |
101 Runtime1::StubID stub_id; | |
102 if (_throw_index_out_of_bounds_exception) { | |
103 stub_id = Runtime1::throw_index_exception_id; | |
104 } else { | |
105 stub_id = Runtime1::throw_range_check_failed_id; | |
106 } | |
107 __ call(RuntimeAddress(Runtime1::entry_for(stub_id))); | |
108 ce->add_call_info_here(_info); | |
109 debug_only(__ should_not_reach_here()); | |
110 } | |
111 | |
112 | |
113 void DivByZeroStub::emit_code(LIR_Assembler* ce) { | |
114 if (_offset != -1) { | |
115 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); | |
116 } | |
117 __ bind(_entry); | |
118 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id))); | |
119 ce->add_call_info_here(_info); | |
120 debug_only(__ should_not_reach_here()); | |
121 } | |
122 | |
123 | |
124 // Implementation of NewInstanceStub | |
125 | |
126 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { | |
127 _result = result; | |
128 _klass = klass; | |
129 _klass_reg = klass_reg; | |
130 _info = new CodeEmitInfo(info); | |
131 assert(stub_id == Runtime1::new_instance_id || | |
132 stub_id == Runtime1::fast_new_instance_id || | |
133 stub_id == Runtime1::fast_new_instance_init_check_id, | |
134 "need new_instance id"); | |
135 _stub_id = stub_id; | |
136 } | |
137 | |
138 | |
139 void NewInstanceStub::emit_code(LIR_Assembler* ce) { | |
140 assert(__ rsp_offset() == 0, "frame size should be fixed"); | |
141 __ bind(_entry); | |
142 __ movl(rdx, _klass_reg->as_register()); | |
143 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id))); | |
144 ce->add_call_info_here(_info); | |
145 ce->verify_oop_map(_info); | |
146 assert(_result->as_register() == rax, "result must in rax,"); | |
147 __ jmp(_continuation); | |
148 } | |
149 | |
150 | |
151 // Implementation of NewTypeArrayStub | |
152 | |
153 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { | |
154 _klass_reg = klass_reg; | |
155 _length = length; | |
156 _result = result; | |
157 _info = new CodeEmitInfo(info); | |
158 } | |
159 | |
160 | |
161 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { | |
162 assert(__ rsp_offset() == 0, "frame size should be fixed"); | |
163 __ bind(_entry); | |
164 assert(_length->as_register() == rbx, "length must in rbx,"); | |
165 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); | |
166 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); | |
167 ce->add_call_info_here(_info); | |
168 ce->verify_oop_map(_info); | |
169 assert(_result->as_register() == rax, "result must in rax,"); | |
170 __ jmp(_continuation); | |
171 } | |
172 | |
173 | |
174 // Implementation of NewObjectArrayStub | |
175 | |
176 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { | |
177 _klass_reg = klass_reg; | |
178 _result = result; | |
179 _length = length; | |
180 _info = new CodeEmitInfo(info); | |
181 } | |
182 | |
183 | |
184 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { | |
185 assert(__ rsp_offset() == 0, "frame size should be fixed"); | |
186 __ bind(_entry); | |
187 assert(_length->as_register() == rbx, "length must in rbx,"); | |
188 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx"); | |
189 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); | |
190 ce->add_call_info_here(_info); | |
191 ce->verify_oop_map(_info); | |
192 assert(_result->as_register() == rax, "result must in rax,"); | |
193 __ jmp(_continuation); | |
194 } | |
195 | |
196 | |
197 // Implementation of MonitorAccessStubs | |
198 | |
199 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info) | |
200 : MonitorAccessStub(obj_reg, lock_reg) | |
201 { | |
202 _info = new CodeEmitInfo(info); | |
203 } | |
204 | |
205 | |
206 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { | |
207 assert(__ rsp_offset() == 0, "frame size should be fixed"); | |
208 __ bind(_entry); | |
209 ce->store_parameter(_obj_reg->as_register(), 1); | |
210 ce->store_parameter(_lock_reg->as_register(), 0); | |
211 Runtime1::StubID enter_id; | |
212 if (ce->compilation()->has_fpu_code()) { | |
213 enter_id = Runtime1::monitorenter_id; | |
214 } else { | |
215 enter_id = Runtime1::monitorenter_nofpu_id; | |
216 } | |
217 __ call(RuntimeAddress(Runtime1::entry_for(enter_id))); | |
218 ce->add_call_info_here(_info); | |
219 ce->verify_oop_map(_info); | |
220 __ jmp(_continuation); | |
221 } | |
222 | |
223 | |
224 void MonitorExitStub::emit_code(LIR_Assembler* ce) { | |
225 __ bind(_entry); | |
226 if (_compute_lock) { | |
227 // lock_reg was destroyed by fast unlocking attempt => recompute it | |
228 ce->monitor_address(_monitor_ix, _lock_reg); | |
229 } | |
230 ce->store_parameter(_lock_reg->as_register(), 0); | |
231 // note: non-blocking leaf routine => no call info needed | |
232 Runtime1::StubID exit_id; | |
233 if (ce->compilation()->has_fpu_code()) { | |
234 exit_id = Runtime1::monitorexit_id; | |
235 } else { | |
236 exit_id = Runtime1::monitorexit_nofpu_id; | |
237 } | |
238 __ call(RuntimeAddress(Runtime1::entry_for(exit_id))); | |
239 __ jmp(_continuation); | |
240 } | |
241 | |
242 | |
243 // Implementation of patching: | |
244 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) | |
245 // - Replace original code with a call to the stub | |
246 // At Runtime: | |
247 // - call to stub, jump to runtime | |
248 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) | |
249 // - in runtime: after initializing class, restore original code, reexecute instruction | |
250 | |
251 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; | |
252 | |
253 void PatchingStub::align_patch_site(MacroAssembler* masm) { | |
254 // We're patching a 5-7 byte instruction on intel and we need to | |
255 // make sure that we don't see a piece of the instruction. It | |
256 // appears mostly impossible on Intel to simply invalidate other | |
257 // processors caches and since they may do aggressive prefetch it's | |
258 // very hard to make a guess about what code might be in the icache. | |
259 // Force the instruction to be double word aligned so that it | |
260 // doesn't span a cache line. | |
261 masm->align(round_to(NativeGeneralJump::instruction_size, wordSize)); | |
262 } | |
263 | |
264 void PatchingStub::emit_code(LIR_Assembler* ce) { | |
265 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call"); | |
266 | |
267 Label call_patch; | |
268 | |
269 // static field accesses have special semantics while the class | |
270 // initializer is being run so we emit a test which can be used to | |
271 // check that this code is being executed by the initializing | |
272 // thread. | |
273 address being_initialized_entry = __ pc(); | |
274 if (CommentedAssembly) { | |
275 __ block_comment(" patch template"); | |
276 } | |
277 if (_id == load_klass_id) { | |
278 // produce a copy of the load klass instruction for use by the being initialized case | |
279 address start = __ pc(); | |
280 jobject o = NULL; | |
281 __ movoop(_obj, o); | |
282 #ifdef ASSERT | |
283 for (int i = 0; i < _bytes_to_copy; i++) { | |
284 address ptr = (address)(_pc_start + i); | |
285 int a_byte = (*ptr) & 0xFF; | |
286 assert(a_byte == *start++, "should be the same code"); | |
287 } | |
288 #endif | |
289 } else { | |
290 // make a copy the code which is going to be patched. | |
291 for ( int i = 0; i < _bytes_to_copy; i++) { | |
292 address ptr = (address)(_pc_start + i); | |
293 int a_byte = (*ptr) & 0xFF; | |
294 __ a_byte (a_byte); | |
295 *ptr = 0x90; // make the site look like a nop | |
296 } | |
297 } | |
298 | |
299 address end_of_patch = __ pc(); | |
300 int bytes_to_skip = 0; | |
301 if (_id == load_klass_id) { | |
302 int offset = __ offset(); | |
303 if (CommentedAssembly) { | |
304 __ block_comment(" being_initialized check"); | |
305 } | |
306 assert(_obj != noreg, "must be a valid register"); | |
307 Register tmp = rax; | |
308 if (_obj == tmp) tmp = rbx; | |
309 __ pushl(tmp); | |
310 __ get_thread(tmp); | |
311 __ cmpl(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc))); | |
312 __ popl(tmp); | |
313 __ jcc(Assembler::notEqual, call_patch); | |
314 | |
315 // access_field patches may execute the patched code before it's | |
316 // copied back into place so we need to jump back into the main | |
317 // code of the nmethod to continue execution. | |
318 __ jmp(_patch_site_continuation); | |
319 | |
320 // make sure this extra code gets skipped | |
321 bytes_to_skip += __ offset() - offset; | |
322 } | |
323 if (CommentedAssembly) { | |
324 __ block_comment("patch data encoded as movl"); | |
325 } | |
326 // Now emit the patch record telling the runtime how to find the | |
327 // pieces of the patch. We only need 3 bytes but for readability of | |
328 // the disassembly we make the data look like a movl reg, imm32, | |
329 // which requires 5 bytes | |
330 int sizeof_patch_record = 5; | |
331 bytes_to_skip += sizeof_patch_record; | |
332 | |
333 // emit the offsets needed to find the code to patch | |
334 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record; | |
335 | |
336 __ a_byte(0xB8); | |
337 __ a_byte(0); | |
338 __ a_byte(being_initialized_entry_offset); | |
339 __ a_byte(bytes_to_skip); | |
340 __ a_byte(_bytes_to_copy); | |
341 address patch_info_pc = __ pc(); | |
342 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info"); | |
343 | |
344 address entry = __ pc(); | |
345 NativeGeneralJump::insert_unconditional((address)_pc_start, entry); | |
346 address target = NULL; | |
347 switch (_id) { | |
348 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break; | |
349 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break; | |
350 default: ShouldNotReachHere(); | |
351 } | |
352 __ bind(call_patch); | |
353 | |
354 if (CommentedAssembly) { | |
355 __ block_comment("patch entry point"); | |
356 } | |
357 __ call(RuntimeAddress(target)); | |
358 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change"); | |
359 ce->add_call_info_here(_info); | |
360 int jmp_off = __ offset(); | |
361 __ jmp(_patch_site_entry); | |
362 // Add enough nops so deoptimization can overwrite the jmp above with a call | |
363 // and not destroy the world. | |
364 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) { | |
365 __ nop(); | |
366 } | |
367 if (_id == load_klass_id) { | |
368 CodeSection* cs = __ code_section(); | |
369 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1)); | |
370 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, relocInfo::oop_type, relocInfo::none); | |
371 } | |
372 } | |
373 | |
374 | |
375 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { | |
376 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); | |
377 __ bind(_entry); | |
378 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id))); | |
379 ce->add_call_info_here(_info); | |
380 debug_only(__ should_not_reach_here()); | |
381 } | |
382 | |
383 | |
384 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { | |
385 assert(__ rsp_offset() == 0, "frame size should be fixed"); | |
386 | |
387 __ bind(_entry); | |
388 // pass the object on stack because all registers must be preserved | |
389 if (_obj->is_cpu_register()) { | |
390 ce->store_parameter(_obj->as_register(), 0); | |
391 } | |
392 __ call(RuntimeAddress(Runtime1::entry_for(_stub))); | |
393 ce->add_call_info_here(_info); | |
394 debug_only(__ should_not_reach_here()); | |
395 } | |
396 | |
397 | |
398 ArrayStoreExceptionStub::ArrayStoreExceptionStub(CodeEmitInfo* info): | |
399 _info(info) { | |
400 } | |
401 | |
402 | |
403 void ArrayStoreExceptionStub::emit_code(LIR_Assembler* ce) { | |
404 assert(__ rsp_offset() == 0, "frame size should be fixed"); | |
405 __ bind(_entry); | |
406 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_array_store_exception_id))); | |
407 ce->add_call_info_here(_info); | |
408 debug_only(__ should_not_reach_here()); | |
409 } | |
410 | |
411 | |
412 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { | |
413 //---------------slow case: call to native----------------- | |
414 __ bind(_entry); | |
415 // Figure out where the args should go | |
416 // This should really convert the IntrinsicID to the methodOop and signature | |
417 // but I don't know how to do that. | |
418 // | |
419 VMRegPair args[5]; | |
420 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT}; | |
421 SharedRuntime::java_calling_convention(signature, args, 5, true); | |
422 | |
423 // push parameters | |
424 // (src, src_pos, dest, destPos, length) | |
425 Register r[5]; | |
426 r[0] = src()->as_register(); | |
427 r[1] = src_pos()->as_register(); | |
428 r[2] = dst()->as_register(); | |
429 r[3] = dst_pos()->as_register(); | |
430 r[4] = length()->as_register(); | |
431 | |
432 // next registers will get stored on the stack | |
433 for (int i = 0; i < 5 ; i++ ) { | |
434 VMReg r_1 = args[i].first(); | |
435 if (r_1->is_stack()) { | |
436 int st_off = r_1->reg2stack() * wordSize; | |
437 __ movl (Address(rsp, st_off), r[i]); | |
438 } else { | |
439 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); | |
440 } | |
441 } | |
442 | |
443 ce->align_call(lir_static_call); | |
444 | |
445 ce->emit_static_call_stub(); | |
446 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(), | |
447 relocInfo::static_call_type); | |
448 __ call(resolve); | |
449 ce->add_call_info_here(info()); | |
450 | |
451 #ifndef PRODUCT | |
452 __ increment(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); | |
453 #endif | |
454 | |
455 __ jmp(_continuation); | |
456 } | |
457 | |
458 | |
459 #undef __ |