Mercurial > hg > truffle
annotate src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp @ 1367:9e321dcfa5b7
6940726: Use BIS instruction for allocation prefetch on Sparc
Summary: Use BIS instruction for allocation prefetch on Sparc
Reviewed-by: twisti
author | kvn |
---|---|
date | Wed, 07 Apr 2010 12:39:27 -0700 |
parents | 6476042f815c |
children | 0a43776437b6 |
rev | line source |
---|---|
0 | 1 /* |
1204 | 2 * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 # include "incls/_precompiled.incl" | |
26 # include "incls/_c1_LIRAssembler_sparc.cpp.incl" | |
27 | |
28 #define __ _masm-> | |
29 | |
30 | |
31 //------------------------------------------------------------ | |
32 | |
33 | |
34 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { | |
35 if (opr->is_constant()) { | |
36 LIR_Const* constant = opr->as_constant_ptr(); | |
37 switch (constant->type()) { | |
38 case T_INT: { | |
39 jint value = constant->as_jint(); | |
40 return Assembler::is_simm13(value); | |
41 } | |
42 | |
43 default: | |
44 return false; | |
45 } | |
46 } | |
47 return false; | |
48 } | |
49 | |
50 | |
51 bool LIR_Assembler::is_single_instruction(LIR_Op* op) { | |
52 switch (op->code()) { | |
53 case lir_null_check: | |
54 return true; | |
55 | |
56 | |
57 case lir_add: | |
58 case lir_ushr: | |
59 case lir_shr: | |
60 case lir_shl: | |
61 // integer shifts and adds are always one instruction | |
62 return op->result_opr()->is_single_cpu(); | |
63 | |
64 | |
65 case lir_move: { | |
66 LIR_Op1* op1 = op->as_Op1(); | |
67 LIR_Opr src = op1->in_opr(); | |
68 LIR_Opr dst = op1->result_opr(); | |
69 | |
70 if (src == dst) { | |
71 NEEDS_CLEANUP; | |
72 // this works around a problem where moves with the same src and dst | |
73 // end up in the delay slot and then the assembler swallows the mov | |
74 // since it has no effect and then it complains because the delay slot | |
75 // is empty. returning false stops the optimizer from putting this in | |
76 // the delay slot | |
77 return false; | |
78 } | |
79 | |
80 // don't put moves involving oops into the delay slot since the VerifyOops code | |
81 // will make it much larger than a single instruction. | |
82 if (VerifyOops) { | |
83 return false; | |
84 } | |
85 | |
86 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none || | |
87 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) { | |
88 return false; | |
89 } | |
90 | |
91 if (dst->is_register()) { | |
92 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) { | |
93 return !PatchALot; | |
94 } else if (src->is_single_stack()) { | |
95 return true; | |
96 } | |
97 } | |
98 | |
99 if (src->is_register()) { | |
100 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) { | |
101 return !PatchALot; | |
102 } else if (dst->is_single_stack()) { | |
103 return true; | |
104 } | |
105 } | |
106 | |
107 if (dst->is_register() && | |
108 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) || | |
109 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) { | |
110 return true; | |
111 } | |
112 | |
113 return false; | |
114 } | |
115 | |
116 default: | |
117 return false; | |
118 } | |
119 ShouldNotReachHere(); | |
120 } | |
121 | |
122 | |
123 LIR_Opr LIR_Assembler::receiverOpr() { | |
124 return FrameMap::O0_oop_opr; | |
125 } | |
126 | |
127 | |
128 LIR_Opr LIR_Assembler::incomingReceiverOpr() { | |
129 return FrameMap::I0_oop_opr; | |
130 } | |
131 | |
132 | |
133 LIR_Opr LIR_Assembler::osrBufferPointer() { | |
134 return FrameMap::I0_opr; | |
135 } | |
136 | |
137 | |
138 int LIR_Assembler::initial_frame_size_in_bytes() { | |
139 return in_bytes(frame_map()->framesize_in_bytes()); | |
140 } | |
141 | |
142 | |
143 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5); | |
144 // we fetch the class of the receiver (O0) and compare it with the cached class. | |
145 // If they do not match we jump to slow case. | |
146 int LIR_Assembler::check_icache() { | |
147 int offset = __ offset(); | |
148 __ inline_cache_check(O0, G5_inline_cache_reg); | |
149 return offset; | |
150 } | |
151 | |
152 | |
153 void LIR_Assembler::osr_entry() { | |
154 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp): | |
155 // | |
156 // 1. Create a new compiled activation. | |
157 // 2. Initialize local variables in the compiled activation. The expression stack must be empty | |
158 // at the osr_bci; it is not initialized. | |
159 // 3. Jump to the continuation address in compiled code to resume execution. | |
160 | |
161 // OSR entry point | |
162 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); | |
163 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); | |
164 ValueStack* entry_state = osr_entry->end()->state(); | |
165 int number_of_locks = entry_state->locks_size(); | |
166 | |
167 // Create a frame for the compiled activation. | |
168 __ build_frame(initial_frame_size_in_bytes()); | |
169 | |
170 // OSR buffer is | |
171 // | |
172 // locals[nlocals-1..0] | |
173 // monitors[number_of_locks-1..0] | |
174 // | |
175 // locals is a direct copy of the interpreter frame so in the osr buffer | |
176 // so first slot in the local array is the last local from the interpreter | |
177 // and last slot is local[0] (receiver) from the interpreter | |
178 // | |
179 // Similarly with locks. The first lock slot in the osr buffer is the nth lock | |
180 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock | |
181 // in the interpreter frame (the method lock if a sync method) | |
182 | |
183 // Initialize monitors in the compiled activation. | |
184 // I0: pointer to osr buffer | |
185 // | |
186 // All other registers are dead at this point and the locals will be | |
187 // copied into place by code emitted in the IR. | |
188 | |
189 Register OSR_buf = osrBufferPointer()->as_register(); | |
190 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); | |
191 int monitor_offset = BytesPerWord * method()->max_locals() + | |
1060 | 192 (2 * BytesPerWord) * (number_of_locks - 1); |
193 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in | |
194 // the OSR buffer using 2 word entries: first the lock and then | |
195 // the oop. | |
0 | 196 for (int i = 0; i < number_of_locks; i++) { |
1060 | 197 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); |
0 | 198 #ifdef ASSERT |
199 // verify the interpreter's monitor has a non-null object | |
200 { | |
201 Label L; | |
1060 | 202 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); |
0 | 203 __ cmp(G0, O7); |
204 __ br(Assembler::notEqual, false, Assembler::pt, L); | |
205 __ delayed()->nop(); | |
206 __ stop("locked object is NULL"); | |
207 __ bind(L); | |
208 } | |
209 #endif // ASSERT | |
210 // Copy the lock field into the compiled activation. | |
1060 | 211 __ ld_ptr(OSR_buf, slot_offset + 0, O7); |
0 | 212 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); |
1060 | 213 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); |
0 | 214 __ st_ptr(O7, frame_map()->address_for_monitor_object(i)); |
215 } | |
216 } | |
217 } | |
218 | |
219 | |
220 // Optimized Library calls | |
221 // This is the fast version of java.lang.String.compare; it has not | |
222 // OSR-entry and therefore, we generate a slow version for OSR's | |
223 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) { | |
224 Register str0 = left->as_register(); | |
225 Register str1 = right->as_register(); | |
226 | |
227 Label Ldone; | |
228 | |
229 Register result = dst->as_register(); | |
230 { | |
231 // Get a pointer to the first character of string0 in tmp0 and get string0.count in str0 | |
232 // Get a pointer to the first character of string1 in tmp1 and get string1.count in str1 | |
233 // Also, get string0.count-string1.count in o7 and get the condition code set | |
234 // Note: some instructions have been hoisted for better instruction scheduling | |
235 | |
236 Register tmp0 = L0; | |
237 Register tmp1 = L1; | |
238 Register tmp2 = L2; | |
239 | |
240 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array | |
241 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position | |
242 int count_offset = java_lang_String:: count_offset_in_bytes(); | |
243 | |
727 | 244 __ ld_ptr(str0, value_offset, tmp0); |
245 __ ld(str0, offset_offset, tmp2); | |
0 | 246 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); |
727 | 247 __ ld(str0, count_offset, str0); |
0 | 248 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); |
249 | |
250 // str1 may be null | |
251 add_debug_info_for_null_check_here(info); | |
252 | |
727 | 253 __ ld_ptr(str1, value_offset, tmp1); |
0 | 254 __ add(tmp0, tmp2, tmp0); |
255 | |
727 | 256 __ ld(str1, offset_offset, tmp2); |
0 | 257 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); |
727 | 258 __ ld(str1, count_offset, str1); |
0 | 259 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); |
260 __ subcc(str0, str1, O7); | |
261 __ add(tmp1, tmp2, tmp1); | |
262 } | |
263 | |
264 { | |
265 // Compute the minimum of the string lengths, scale it and store it in limit | |
266 Register count0 = I0; | |
267 Register count1 = I1; | |
268 Register limit = L3; | |
269 | |
270 Label Lskip; | |
271 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter | |
272 __ br(Assembler::greater, true, Assembler::pt, Lskip); | |
273 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter | |
274 __ bind(Lskip); | |
275 | |
276 // If either string is empty (or both of them) the result is the difference in lengths | |
277 __ cmp(limit, 0); | |
278 __ br(Assembler::equal, true, Assembler::pn, Ldone); | |
279 __ delayed()->mov(O7, result); // result is difference in lengths | |
280 } | |
281 | |
282 { | |
283 // Neither string is empty | |
284 Label Lloop; | |
285 | |
286 Register base0 = L0; | |
287 Register base1 = L1; | |
288 Register chr0 = I0; | |
289 Register chr1 = I1; | |
290 Register limit = L3; | |
291 | |
292 // Shift base0 and base1 to the end of the arrays, negate limit | |
293 __ add(base0, limit, base0); | |
294 __ add(base1, limit, base1); | |
295 __ neg(limit); // limit = -min{string0.count, strin1.count} | |
296 | |
297 __ lduh(base0, limit, chr0); | |
298 __ bind(Lloop); | |
299 __ lduh(base1, limit, chr1); | |
300 __ subcc(chr0, chr1, chr0); | |
301 __ br(Assembler::notZero, false, Assembler::pn, Ldone); | |
302 assert(chr0 == result, "result must be pre-placed"); | |
303 __ delayed()->inccc(limit, sizeof(jchar)); | |
304 __ br(Assembler::notZero, true, Assembler::pt, Lloop); | |
305 __ delayed()->lduh(base0, limit, chr0); | |
306 } | |
307 | |
308 // If strings are equal up to min length, return the length difference. | |
309 __ mov(O7, result); | |
310 | |
311 // Otherwise, return the difference between the first mismatched chars. | |
312 __ bind(Ldone); | |
313 } | |
314 | |
315 | |
316 // -------------------------------------------------------------------------------------------- | |
317 | |
318 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) { | |
319 if (!GenerateSynchronizationCode) return; | |
320 | |
321 Register obj_reg = obj_opr->as_register(); | |
322 Register lock_reg = lock_opr->as_register(); | |
323 | |
324 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); | |
325 Register reg = mon_addr.base(); | |
326 int offset = mon_addr.disp(); | |
327 // compute pointer to BasicLock | |
328 if (mon_addr.is_simm13()) { | |
329 __ add(reg, offset, lock_reg); | |
330 } | |
331 else { | |
332 __ set(offset, lock_reg); | |
333 __ add(reg, lock_reg, lock_reg); | |
334 } | |
335 // unlock object | |
336 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no); | |
337 // _slow_case_stubs->append(slow_case); | |
338 // temporary fix: must be created after exceptionhandler, therefore as call stub | |
339 _slow_case_stubs->append(slow_case); | |
340 if (UseFastLocking) { | |
341 // try inlined fast unlocking first, revert to slow locking if it fails | |
342 // note: lock_reg points to the displaced header since the displaced header offset is 0! | |
343 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); | |
344 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry()); | |
345 } else { | |
346 // always do slow unlocking | |
347 // note: the slow unlocking code could be inlined here, however if we use | |
348 // slow unlocking, speed doesn't matter anyway and this solution is | |
349 // simpler and requires less duplicated code - additionally, the | |
350 // slow unlocking code is the same in either case which simplifies | |
351 // debugging | |
352 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry()); | |
353 __ delayed()->nop(); | |
354 } | |
355 // done | |
356 __ bind(*slow_case->continuation()); | |
357 } | |
358 | |
359 | |
1204 | 360 int LIR_Assembler::emit_exception_handler() { |
0 | 361 // if the last instruction is a call (typically to do a throw which |
362 // is coming at the end after block reordering) the return address | |
363 // must still point into the code area in order to avoid assertion | |
364 // failures when searching for the corresponding bci => add a nop | |
365 // (was bug 5/14/1999 - gri) | |
366 __ nop(); | |
367 | |
368 // generate code for exception handler | |
369 ciMethod* method = compilation()->method(); | |
370 | |
371 address handler_base = __ start_a_stub(exception_handler_size); | |
372 | |
373 if (handler_base == NULL) { | |
374 // not enough space left for the handler | |
375 bailout("exception handler overflow"); | |
1204 | 376 return -1; |
0 | 377 } |
1204 | 378 |
0 | 379 int offset = code_offset(); |
380 | |
1295 | 381 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); |
0 | 382 __ delayed()->nop(); |
383 debug_only(__ stop("should have gone to the caller");) | |
384 assert(code_offset() - offset <= exception_handler_size, "overflow"); | |
385 __ end_a_stub(); | |
1204 | 386 |
387 return offset; | |
0 | 388 } |
389 | |
1204 | 390 |
391 int LIR_Assembler::emit_deopt_handler() { | |
0 | 392 // if the last instruction is a call (typically to do a throw which |
393 // is coming at the end after block reordering) the return address | |
394 // must still point into the code area in order to avoid assertion | |
395 // failures when searching for the corresponding bci => add a nop | |
396 // (was bug 5/14/1999 - gri) | |
397 __ nop(); | |
398 | |
399 // generate code for deopt handler | |
400 ciMethod* method = compilation()->method(); | |
401 address handler_base = __ start_a_stub(deopt_handler_size); | |
402 if (handler_base == NULL) { | |
403 // not enough space left for the handler | |
404 bailout("deopt handler overflow"); | |
1204 | 405 return -1; |
0 | 406 } |
1204 | 407 |
0 | 408 int offset = code_offset(); |
727 | 409 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); |
410 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp | |
0 | 411 __ delayed()->nop(); |
412 assert(code_offset() - offset <= deopt_handler_size, "overflow"); | |
413 debug_only(__ stop("should have gone to the caller");) | |
414 __ end_a_stub(); | |
1204 | 415 |
416 return offset; | |
0 | 417 } |
418 | |
419 | |
420 void LIR_Assembler::jobject2reg(jobject o, Register reg) { | |
421 if (o == NULL) { | |
422 __ set(NULL_WORD, reg); | |
423 } else { | |
424 int oop_index = __ oop_recorder()->find_index(o); | |
425 RelocationHolder rspec = oop_Relocation::spec(oop_index); | |
426 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created | |
427 } | |
428 } | |
429 | |
430 | |
431 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { | |
432 // Allocate a new index in oop table to hold the oop once it's been patched | |
433 int oop_index = __ oop_recorder()->allocate_index((jobject)NULL); | |
434 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index); | |
435 | |
727 | 436 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); |
437 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); | |
0 | 438 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the |
439 // NULL will be dynamically patched later and the patched value may be large. We must | |
440 // therefore generate the sethi/add as a placeholders | |
727 | 441 __ patchable_set(addrlit, reg); |
0 | 442 |
443 patching_epilog(patch, lir_patch_normal, reg, info); | |
444 } | |
445 | |
446 | |
447 void LIR_Assembler::emit_op3(LIR_Op3* op) { | |
448 Register Rdividend = op->in_opr1()->as_register(); | |
449 Register Rdivisor = noreg; | |
450 Register Rscratch = op->in_opr3()->as_register(); | |
451 Register Rresult = op->result_opr()->as_register(); | |
452 int divisor = -1; | |
453 | |
454 if (op->in_opr2()->is_register()) { | |
455 Rdivisor = op->in_opr2()->as_register(); | |
456 } else { | |
457 divisor = op->in_opr2()->as_constant_ptr()->as_jint(); | |
458 assert(Assembler::is_simm13(divisor), "can only handle simm13"); | |
459 } | |
460 | |
461 assert(Rdividend != Rscratch, ""); | |
462 assert(Rdivisor != Rscratch, ""); | |
463 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv"); | |
464 | |
465 if (Rdivisor == noreg && is_power_of_2(divisor)) { | |
466 // convert division by a power of two into some shifts and logical operations | |
467 if (op->code() == lir_idiv) { | |
468 if (divisor == 2) { | |
469 __ srl(Rdividend, 31, Rscratch); | |
470 } else { | |
471 __ sra(Rdividend, 31, Rscratch); | |
472 __ and3(Rscratch, divisor - 1, Rscratch); | |
473 } | |
474 __ add(Rdividend, Rscratch, Rscratch); | |
475 __ sra(Rscratch, log2_intptr(divisor), Rresult); | |
476 return; | |
477 } else { | |
478 if (divisor == 2) { | |
479 __ srl(Rdividend, 31, Rscratch); | |
480 } else { | |
481 __ sra(Rdividend, 31, Rscratch); | |
482 __ and3(Rscratch, divisor - 1,Rscratch); | |
483 } | |
484 __ add(Rdividend, Rscratch, Rscratch); | |
485 __ andn(Rscratch, divisor - 1,Rscratch); | |
486 __ sub(Rdividend, Rscratch, Rresult); | |
487 return; | |
488 } | |
489 } | |
490 | |
491 __ sra(Rdividend, 31, Rscratch); | |
492 __ wry(Rscratch); | |
493 if (!VM_Version::v9_instructions_work()) { | |
494 // v9 doesn't require these nops | |
495 __ nop(); | |
496 __ nop(); | |
497 __ nop(); | |
498 __ nop(); | |
499 } | |
500 | |
501 add_debug_info_for_div0_here(op->info()); | |
502 | |
503 if (Rdivisor != noreg) { | |
504 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch)); | |
505 } else { | |
506 assert(Assembler::is_simm13(divisor), "can only handle simm13"); | |
507 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch)); | |
508 } | |
509 | |
510 Label skip; | |
511 __ br(Assembler::overflowSet, true, Assembler::pn, skip); | |
512 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch)); | |
513 __ bind(skip); | |
514 | |
515 if (op->code() == lir_irem) { | |
516 if (Rdivisor != noreg) { | |
517 __ smul(Rscratch, Rdivisor, Rscratch); | |
518 } else { | |
519 __ smul(Rscratch, divisor, Rscratch); | |
520 } | |
521 __ sub(Rdividend, Rscratch, Rresult); | |
522 } | |
523 } | |
524 | |
525 | |
526 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { | |
527 #ifdef ASSERT | |
528 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); | |
529 if (op->block() != NULL) _branch_target_blocks.append(op->block()); | |
530 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); | |
531 #endif | |
532 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); | |
533 | |
534 if (op->cond() == lir_cond_always) { | |
535 __ br(Assembler::always, false, Assembler::pt, *(op->label())); | |
536 } else if (op->code() == lir_cond_float_branch) { | |
537 assert(op->ublock() != NULL, "must have unordered successor"); | |
538 bool is_unordered = (op->ublock() == op->block()); | |
539 Assembler::Condition acond; | |
540 switch (op->cond()) { | |
541 case lir_cond_equal: acond = Assembler::f_equal; break; | |
542 case lir_cond_notEqual: acond = Assembler::f_notEqual; break; | |
543 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break; | |
544 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break; | |
545 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break; | |
546 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break; | |
547 default : ShouldNotReachHere(); | |
548 }; | |
549 | |
550 if (!VM_Version::v9_instructions_work()) { | |
551 __ nop(); | |
552 } | |
553 __ fb( acond, false, Assembler::pn, *(op->label())); | |
554 } else { | |
555 assert (op->code() == lir_branch, "just checking"); | |
556 | |
557 Assembler::Condition acond; | |
558 switch (op->cond()) { | |
559 case lir_cond_equal: acond = Assembler::equal; break; | |
560 case lir_cond_notEqual: acond = Assembler::notEqual; break; | |
561 case lir_cond_less: acond = Assembler::less; break; | |
562 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; | |
563 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; | |
564 case lir_cond_greater: acond = Assembler::greater; break; | |
565 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; | |
566 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; | |
567 default: ShouldNotReachHere(); | |
568 }; | |
569 | |
570 // sparc has different condition codes for testing 32-bit | |
571 // vs. 64-bit values. We could always test xcc is we could | |
572 // guarantee that 32-bit loads always sign extended but that isn't | |
573 // true and since sign extension isn't free, it would impose a | |
574 // slight cost. | |
575 #ifdef _LP64 | |
576 if (op->type() == T_INT) { | |
577 __ br(acond, false, Assembler::pn, *(op->label())); | |
578 } else | |
579 #endif | |
580 __ brx(acond, false, Assembler::pn, *(op->label())); | |
581 } | |
582 // The peephole pass fills the delay slot | |
583 } | |
584 | |
585 | |
586 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { | |
587 Bytecodes::Code code = op->bytecode(); | |
588 LIR_Opr dst = op->result_opr(); | |
589 | |
590 switch(code) { | |
591 case Bytecodes::_i2l: { | |
592 Register rlo = dst->as_register_lo(); | |
593 Register rhi = dst->as_register_hi(); | |
594 Register rval = op->in_opr()->as_register(); | |
595 #ifdef _LP64 | |
596 __ sra(rval, 0, rlo); | |
597 #else | |
598 __ mov(rval, rlo); | |
599 __ sra(rval, BitsPerInt-1, rhi); | |
600 #endif | |
601 break; | |
602 } | |
603 case Bytecodes::_i2d: | |
604 case Bytecodes::_i2f: { | |
605 bool is_double = (code == Bytecodes::_i2d); | |
606 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); | |
607 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; | |
608 FloatRegister rsrc = op->in_opr()->as_float_reg(); | |
609 if (rsrc != rdst) { | |
610 __ fmov(FloatRegisterImpl::S, rsrc, rdst); | |
611 } | |
612 __ fitof(w, rdst, rdst); | |
613 break; | |
614 } | |
615 case Bytecodes::_f2i:{ | |
616 FloatRegister rsrc = op->in_opr()->as_float_reg(); | |
617 Address addr = frame_map()->address_for_slot(dst->single_stack_ix()); | |
618 Label L; | |
619 // result must be 0 if value is NaN; test by comparing value to itself | |
620 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc); | |
621 if (!VM_Version::v9_instructions_work()) { | |
622 __ nop(); | |
623 } | |
624 __ fb(Assembler::f_unordered, true, Assembler::pn, L); | |
625 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN | |
626 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc); | |
627 // move integer result from float register to int register | |
628 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp()); | |
629 __ bind (L); | |
630 break; | |
631 } | |
632 case Bytecodes::_l2i: { | |
633 Register rlo = op->in_opr()->as_register_lo(); | |
634 Register rhi = op->in_opr()->as_register_hi(); | |
635 Register rdst = dst->as_register(); | |
636 #ifdef _LP64 | |
637 __ sra(rlo, 0, rdst); | |
638 #else | |
639 __ mov(rlo, rdst); | |
640 #endif | |
641 break; | |
642 } | |
643 case Bytecodes::_d2f: | |
644 case Bytecodes::_f2d: { | |
645 bool is_double = (code == Bytecodes::_f2d); | |
646 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check"); | |
647 LIR_Opr val = op->in_opr(); | |
648 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg(); | |
649 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); | |
650 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D; | |
651 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; | |
652 __ ftof(vw, dw, rval, rdst); | |
653 break; | |
654 } | |
655 case Bytecodes::_i2s: | |
656 case Bytecodes::_i2b: { | |
657 Register rval = op->in_opr()->as_register(); | |
658 Register rdst = dst->as_register(); | |
659 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort); | |
660 __ sll (rval, shift, rdst); | |
661 __ sra (rdst, shift, rdst); | |
662 break; | |
663 } | |
664 case Bytecodes::_i2c: { | |
665 Register rval = op->in_opr()->as_register(); | |
666 Register rdst = dst->as_register(); | |
667 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte; | |
668 __ sll (rval, shift, rdst); | |
669 __ srl (rdst, shift, rdst); | |
670 break; | |
671 } | |
672 | |
673 default: ShouldNotReachHere(); | |
674 } | |
675 } | |
676 | |
677 | |
678 void LIR_Assembler::align_call(LIR_Code) { | |
679 // do nothing since all instructions are word aligned on sparc | |
680 } | |
681 | |
682 | |
1295 | 683 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { |
684 __ call(op->addr(), rtype); | |
0 | 685 // the peephole pass fills the delay slot |
686 } | |
687 | |
688 | |
1295 | 689 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { |
0 | 690 RelocationHolder rspec = virtual_call_Relocation::spec(pc()); |
691 __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg); | |
692 __ relocate(rspec); | |
1295 | 693 __ call(op->addr(), relocInfo::none); |
0 | 694 // the peephole pass fills the delay slot |
695 } | |
696 | |
697 | |
1295 | 698 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { |
699 add_debug_info_for_null_check_here(op->info()); | |
727 | 700 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch); |
1295 | 701 if (__ is_simm13(op->vtable_offset())) { |
702 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method); | |
0 | 703 } else { |
704 // This will generate 2 instructions | |
1295 | 705 __ set(op->vtable_offset(), G5_method); |
0 | 706 // ld_ptr, set_hi, set |
707 __ ld_ptr(G3_scratch, G5_method, G5_method); | |
708 } | |
727 | 709 __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch); |
0 | 710 __ callr(G3_scratch, G0); |
711 // the peephole pass fills the delay slot | |
712 } | |
713 | |
714 | |
1301
fc2c71045ada
6934966: JSR 292 add C1 logic for saved SP over MethodHandle calls
twisti
parents:
1297
diff
changeset
|
715 void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) { |
1295 | 716 Unimplemented(); |
717 } | |
718 | |
719 | |
1301
fc2c71045ada
6934966: JSR 292 add C1 logic for saved SP over MethodHandle calls
twisti
parents:
1297
diff
changeset
|
720 void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) { |
1295 | 721 Unimplemented(); |
722 } | |
723 | |
724 | |
0 | 725 // load with 32-bit displacement |
726 int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) { | |
727 int load_offset = code_offset(); | |
728 if (Assembler::is_simm13(disp)) { | |
729 if (info != NULL) add_debug_info_for_null_check_here(info); | |
730 switch(ld_type) { | |
731 case T_BOOLEAN: // fall through | |
732 case T_BYTE : __ ldsb(s, disp, d); break; | |
733 case T_CHAR : __ lduh(s, disp, d); break; | |
734 case T_SHORT : __ ldsh(s, disp, d); break; | |
735 case T_INT : __ ld(s, disp, d); break; | |
736 case T_ADDRESS:// fall through | |
737 case T_ARRAY : // fall through | |
738 case T_OBJECT: __ ld_ptr(s, disp, d); break; | |
739 default : ShouldNotReachHere(); | |
740 } | |
741 } else { | |
727 | 742 __ set(disp, O7); |
0 | 743 if (info != NULL) add_debug_info_for_null_check_here(info); |
744 load_offset = code_offset(); | |
745 switch(ld_type) { | |
746 case T_BOOLEAN: // fall through | |
747 case T_BYTE : __ ldsb(s, O7, d); break; | |
748 case T_CHAR : __ lduh(s, O7, d); break; | |
749 case T_SHORT : __ ldsh(s, O7, d); break; | |
750 case T_INT : __ ld(s, O7, d); break; | |
751 case T_ADDRESS:// fall through | |
752 case T_ARRAY : // fall through | |
753 case T_OBJECT: __ ld_ptr(s, O7, d); break; | |
754 default : ShouldNotReachHere(); | |
755 } | |
756 } | |
757 if (ld_type == T_ARRAY || ld_type == T_OBJECT) __ verify_oop(d); | |
758 return load_offset; | |
759 } | |
760 | |
761 | |
762 // store with 32-bit displacement | |
763 void LIR_Assembler::store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info) { | |
764 if (Assembler::is_simm13(offset)) { | |
765 if (info != NULL) add_debug_info_for_null_check_here(info); | |
766 switch (type) { | |
767 case T_BOOLEAN: // fall through | |
768 case T_BYTE : __ stb(value, base, offset); break; | |
769 case T_CHAR : __ sth(value, base, offset); break; | |
770 case T_SHORT : __ sth(value, base, offset); break; | |
771 case T_INT : __ stw(value, base, offset); break; | |
772 case T_ADDRESS:// fall through | |
773 case T_ARRAY : // fall through | |
774 case T_OBJECT: __ st_ptr(value, base, offset); break; | |
775 default : ShouldNotReachHere(); | |
776 } | |
777 } else { | |
727 | 778 __ set(offset, O7); |
0 | 779 if (info != NULL) add_debug_info_for_null_check_here(info); |
780 switch (type) { | |
781 case T_BOOLEAN: // fall through | |
782 case T_BYTE : __ stb(value, base, O7); break; | |
783 case T_CHAR : __ sth(value, base, O7); break; | |
784 case T_SHORT : __ sth(value, base, O7); break; | |
785 case T_INT : __ stw(value, base, O7); break; | |
786 case T_ADDRESS:// fall through | |
787 case T_ARRAY : //fall through | |
788 case T_OBJECT: __ st_ptr(value, base, O7); break; | |
789 default : ShouldNotReachHere(); | |
790 } | |
791 } | |
792 // Note: Do the store before verification as the code might be patched! | |
793 if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(value); | |
794 } | |
795 | |
796 | |
797 // load float with 32-bit displacement | |
798 void LIR_Assembler::load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) { | |
799 FloatRegisterImpl::Width w; | |
800 switch(ld_type) { | |
801 case T_FLOAT : w = FloatRegisterImpl::S; break; | |
802 case T_DOUBLE: w = FloatRegisterImpl::D; break; | |
803 default : ShouldNotReachHere(); | |
804 } | |
805 | |
806 if (Assembler::is_simm13(disp)) { | |
807 if (info != NULL) add_debug_info_for_null_check_here(info); | |
808 if (disp % BytesPerLong != 0 && w == FloatRegisterImpl::D) { | |
809 __ ldf(FloatRegisterImpl::S, s, disp + BytesPerWord, d->successor()); | |
810 __ ldf(FloatRegisterImpl::S, s, disp , d); | |
811 } else { | |
812 __ ldf(w, s, disp, d); | |
813 } | |
814 } else { | |
727 | 815 __ set(disp, O7); |
0 | 816 if (info != NULL) add_debug_info_for_null_check_here(info); |
817 __ ldf(w, s, O7, d); | |
818 } | |
819 } | |
820 | |
821 | |
822 // store float with 32-bit displacement | |
823 void LIR_Assembler::store(FloatRegister value, Register base, int offset, BasicType type, CodeEmitInfo *info) { | |
824 FloatRegisterImpl::Width w; | |
825 switch(type) { | |
826 case T_FLOAT : w = FloatRegisterImpl::S; break; | |
827 case T_DOUBLE: w = FloatRegisterImpl::D; break; | |
828 default : ShouldNotReachHere(); | |
829 } | |
830 | |
831 if (Assembler::is_simm13(offset)) { | |
832 if (info != NULL) add_debug_info_for_null_check_here(info); | |
833 if (w == FloatRegisterImpl::D && offset % BytesPerLong != 0) { | |
834 __ stf(FloatRegisterImpl::S, value->successor(), base, offset + BytesPerWord); | |
835 __ stf(FloatRegisterImpl::S, value , base, offset); | |
836 } else { | |
837 __ stf(w, value, base, offset); | |
838 } | |
839 } else { | |
727 | 840 __ set(offset, O7); |
0 | 841 if (info != NULL) add_debug_info_for_null_check_here(info); |
842 __ stf(w, value, O7, base); | |
843 } | |
844 } | |
845 | |
846 | |
847 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned) { | |
848 int store_offset; | |
849 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { | |
850 assert(!unaligned, "can't handle this"); | |
851 // for offsets larger than a simm13 we setup the offset in O7 | |
727 | 852 __ set(offset, O7); |
0 | 853 store_offset = store(from_reg, base, O7, type); |
854 } else { | |
855 if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register()); | |
856 store_offset = code_offset(); | |
857 switch (type) { | |
858 case T_BOOLEAN: // fall through | |
859 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break; | |
860 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break; | |
861 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break; | |
862 case T_INT : __ stw(from_reg->as_register(), base, offset); break; | |
863 case T_LONG : | |
864 #ifdef _LP64 | |
865 if (unaligned || PatchALot) { | |
866 __ srax(from_reg->as_register_lo(), 32, O7); | |
867 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); | |
868 __ stw(O7, base, offset + hi_word_offset_in_bytes); | |
869 } else { | |
870 __ stx(from_reg->as_register_lo(), base, offset); | |
871 } | |
872 #else | |
873 assert(Assembler::is_simm13(offset + 4), "must be"); | |
874 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); | |
875 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes); | |
876 #endif | |
877 break; | |
878 case T_ADDRESS:// fall through | |
879 case T_ARRAY : // fall through | |
880 case T_OBJECT: __ st_ptr(from_reg->as_register(), base, offset); break; | |
881 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break; | |
882 case T_DOUBLE: | |
883 { | |
884 FloatRegister reg = from_reg->as_double_reg(); | |
885 // split unaligned stores | |
886 if (unaligned || PatchALot) { | |
887 assert(Assembler::is_simm13(offset + 4), "must be"); | |
888 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4); | |
889 __ stf(FloatRegisterImpl::S, reg, base, offset); | |
890 } else { | |
891 __ stf(FloatRegisterImpl::D, reg, base, offset); | |
892 } | |
893 break; | |
894 } | |
895 default : ShouldNotReachHere(); | |
896 } | |
897 } | |
898 return store_offset; | |
899 } | |
900 | |
901 | |
902 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type) { | |
903 if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register()); | |
904 int store_offset = code_offset(); | |
905 switch (type) { | |
906 case T_BOOLEAN: // fall through | |
907 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break; | |
908 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break; | |
909 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break; | |
910 case T_INT : __ stw(from_reg->as_register(), base, disp); break; | |
911 case T_LONG : | |
912 #ifdef _LP64 | |
913 __ stx(from_reg->as_register_lo(), base, disp); | |
914 #else | |
915 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match"); | |
916 __ std(from_reg->as_register_hi(), base, disp); | |
917 #endif | |
918 break; | |
919 case T_ADDRESS:// fall through | |
920 case T_ARRAY : // fall through | |
921 case T_OBJECT: __ st_ptr(from_reg->as_register(), base, disp); break; | |
922 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break; | |
923 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break; | |
924 default : ShouldNotReachHere(); | |
925 } | |
926 return store_offset; | |
927 } | |
928 | |
929 | |
930 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool unaligned) { | |
931 int load_offset; | |
932 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { | |
933 assert(base != O7, "destroying register"); | |
934 assert(!unaligned, "can't handle this"); | |
935 // for offsets larger than a simm13 we setup the offset in O7 | |
727 | 936 __ set(offset, O7); |
0 | 937 load_offset = load(base, O7, to_reg, type); |
938 } else { | |
939 load_offset = code_offset(); | |
940 switch(type) { | |
941 case T_BOOLEAN: // fall through | |
942 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break; | |
943 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break; | |
944 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break; | |
945 case T_INT : __ ld(base, offset, to_reg->as_register()); break; | |
946 case T_LONG : | |
947 if (!unaligned) { | |
948 #ifdef _LP64 | |
949 __ ldx(base, offset, to_reg->as_register_lo()); | |
950 #else | |
951 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), | |
952 "must be sequential"); | |
953 __ ldd(base, offset, to_reg->as_register_hi()); | |
954 #endif | |
955 } else { | |
956 #ifdef _LP64 | |
957 assert(base != to_reg->as_register_lo(), "can't handle this"); | |
1060 | 958 assert(O7 != to_reg->as_register_lo(), "can't handle this"); |
0 | 959 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo()); |
1060 | 960 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last |
0 | 961 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo()); |
1060 | 962 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo()); |
0 | 963 #else |
964 if (base == to_reg->as_register_lo()) { | |
965 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); | |
966 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); | |
967 } else { | |
968 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); | |
969 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); | |
970 } | |
971 #endif | |
972 } | |
973 break; | |
974 case T_ADDRESS:// fall through | |
975 case T_ARRAY : // fall through | |
976 case T_OBJECT: __ ld_ptr(base, offset, to_reg->as_register()); break; | |
977 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break; | |
978 case T_DOUBLE: | |
979 { | |
980 FloatRegister reg = to_reg->as_double_reg(); | |
981 // split unaligned loads | |
982 if (unaligned || PatchALot) { | |
1060 | 983 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor()); |
984 __ ldf(FloatRegisterImpl::S, base, offset, reg); | |
0 | 985 } else { |
986 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg()); | |
987 } | |
988 break; | |
989 } | |
990 default : ShouldNotReachHere(); | |
991 } | |
992 if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register()); | |
993 } | |
994 return load_offset; | |
995 } | |
996 | |
997 | |
998 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type) { | |
999 int load_offset = code_offset(); | |
1000 switch(type) { | |
1001 case T_BOOLEAN: // fall through | |
1002 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; | |
1003 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; | |
1004 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; | |
1005 case T_INT : __ ld(base, disp, to_reg->as_register()); break; | |
1006 case T_ADDRESS:// fall through | |
1007 case T_ARRAY : // fall through | |
1008 case T_OBJECT: __ ld_ptr(base, disp, to_reg->as_register()); break; | |
1009 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; | |
1010 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; | |
1011 case T_LONG : | |
1012 #ifdef _LP64 | |
1013 __ ldx(base, disp, to_reg->as_register_lo()); | |
1014 #else | |
1015 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), | |
1016 "must be sequential"); | |
1017 __ ldd(base, disp, to_reg->as_register_hi()); | |
1018 #endif | |
1019 break; | |
1020 default : ShouldNotReachHere(); | |
1021 } | |
1022 if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register()); | |
1023 return load_offset; | |
1024 } | |
1025 | |
1026 | |
1027 // load/store with an Address | |
1028 void LIR_Assembler::load(const Address& a, Register d, BasicType ld_type, CodeEmitInfo *info, int offset) { | |
1029 load(a.base(), a.disp() + offset, d, ld_type, info); | |
1030 } | |
1031 | |
1032 | |
1033 void LIR_Assembler::store(Register value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) { | |
1034 store(value, dest.base(), dest.disp() + offset, type, info); | |
1035 } | |
1036 | |
1037 | |
1038 // loadf/storef with an Address | |
1039 void LIR_Assembler::load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info, int offset) { | |
1040 load(a.base(), a.disp() + offset, d, ld_type, info); | |
1041 } | |
1042 | |
1043 | |
1044 void LIR_Assembler::store(FloatRegister value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) { | |
1045 store(value, dest.base(), dest.disp() + offset, type, info); | |
1046 } | |
1047 | |
1048 | |
1049 // load/store with an Address | |
1050 void LIR_Assembler::load(LIR_Address* a, Register d, BasicType ld_type, CodeEmitInfo *info) { | |
1051 load(as_Address(a), d, ld_type, info); | |
1052 } | |
1053 | |
1054 | |
1055 void LIR_Assembler::store(Register value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) { | |
1056 store(value, as_Address(dest), type, info); | |
1057 } | |
1058 | |
1059 | |
1060 // loadf/storef with an Address | |
1061 void LIR_Assembler::load(LIR_Address* a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) { | |
1062 load(as_Address(a), d, ld_type, info); | |
1063 } | |
1064 | |
1065 | |
1066 void LIR_Assembler::store(FloatRegister value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) { | |
1067 store(value, as_Address(dest), type, info); | |
1068 } | |
1069 | |
1070 | |
1071 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { | |
1072 LIR_Const* c = src->as_constant_ptr(); | |
1073 switch (c->type()) { | |
1074 case T_INT: | |
1297
c466efa608d5
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
1295
diff
changeset
|
1075 case T_FLOAT: |
c466efa608d5
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
1295
diff
changeset
|
1076 case T_ADDRESS: { |
0 | 1077 Register src_reg = O7; |
1078 int value = c->as_jint_bits(); | |
1079 if (value == 0) { | |
1080 src_reg = G0; | |
1081 } else { | |
1082 __ set(value, O7); | |
1083 } | |
1084 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); | |
1085 __ stw(src_reg, addr.base(), addr.disp()); | |
1086 break; | |
1087 } | |
1088 case T_OBJECT: { | |
1089 Register src_reg = O7; | |
1090 jobject2reg(c->as_jobject(), src_reg); | |
1091 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); | |
1092 __ st_ptr(src_reg, addr.base(), addr.disp()); | |
1093 break; | |
1094 } | |
1095 case T_LONG: | |
1096 case T_DOUBLE: { | |
1097 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); | |
1098 | |
1099 Register tmp = O7; | |
1100 int value_lo = c->as_jint_lo_bits(); | |
1101 if (value_lo == 0) { | |
1102 tmp = G0; | |
1103 } else { | |
1104 __ set(value_lo, O7); | |
1105 } | |
1106 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes); | |
1107 int value_hi = c->as_jint_hi_bits(); | |
1108 if (value_hi == 0) { | |
1109 tmp = G0; | |
1110 } else { | |
1111 __ set(value_hi, O7); | |
1112 } | |
1113 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes); | |
1114 break; | |
1115 } | |
1116 default: | |
1117 Unimplemented(); | |
1118 } | |
1119 } | |
1120 | |
1121 | |
1122 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) { | |
1123 LIR_Const* c = src->as_constant_ptr(); | |
1124 LIR_Address* addr = dest->as_address_ptr(); | |
1125 Register base = addr->base()->as_pointer_register(); | |
1126 | |
1127 if (info != NULL) { | |
1128 add_debug_info_for_null_check_here(info); | |
1129 } | |
1130 switch (c->type()) { | |
1131 case T_INT: | |
1297
c466efa608d5
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
1295
diff
changeset
|
1132 case T_FLOAT: |
c466efa608d5
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
1295
diff
changeset
|
1133 case T_ADDRESS: { |
0 | 1134 LIR_Opr tmp = FrameMap::O7_opr; |
1135 int value = c->as_jint_bits(); | |
1136 if (value == 0) { | |
1137 tmp = FrameMap::G0_opr; | |
1138 } else if (Assembler::is_simm13(value)) { | |
1139 __ set(value, O7); | |
1140 } | |
1141 if (addr->index()->is_valid()) { | |
1142 assert(addr->disp() == 0, "must be zero"); | |
1143 store(tmp, base, addr->index()->as_pointer_register(), type); | |
1144 } else { | |
1145 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); | |
1146 store(tmp, base, addr->disp(), type); | |
1147 } | |
1148 break; | |
1149 } | |
1150 case T_LONG: | |
1151 case T_DOUBLE: { | |
1152 assert(!addr->index()->is_valid(), "can't handle reg reg address here"); | |
1153 assert(Assembler::is_simm13(addr->disp()) && | |
1154 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses"); | |
1155 | |
1156 Register tmp = O7; | |
1157 int value_lo = c->as_jint_lo_bits(); | |
1158 if (value_lo == 0) { | |
1159 tmp = G0; | |
1160 } else { | |
1161 __ set(value_lo, O7); | |
1162 } | |
1163 store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT); | |
1164 int value_hi = c->as_jint_hi_bits(); | |
1165 if (value_hi == 0) { | |
1166 tmp = G0; | |
1167 } else { | |
1168 __ set(value_hi, O7); | |
1169 } | |
1170 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT); | |
1171 break; | |
1172 } | |
1173 case T_OBJECT: { | |
1174 jobject obj = c->as_jobject(); | |
1175 LIR_Opr tmp; | |
1176 if (obj == NULL) { | |
1177 tmp = FrameMap::G0_opr; | |
1178 } else { | |
1179 tmp = FrameMap::O7_opr; | |
1180 jobject2reg(c->as_jobject(), O7); | |
1181 } | |
1182 // handle either reg+reg or reg+disp address | |
1183 if (addr->index()->is_valid()) { | |
1184 assert(addr->disp() == 0, "must be zero"); | |
1185 store(tmp, base, addr->index()->as_pointer_register(), type); | |
1186 } else { | |
1187 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); | |
1188 store(tmp, base, addr->disp(), type); | |
1189 } | |
1190 | |
1191 break; | |
1192 } | |
1193 default: | |
1194 Unimplemented(); | |
1195 } | |
1196 } | |
1197 | |
1198 | |
1199 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { | |
1200 LIR_Const* c = src->as_constant_ptr(); | |
1201 LIR_Opr to_reg = dest; | |
1202 | |
1203 switch (c->type()) { | |
1204 case T_INT: | |
1297
c466efa608d5
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
1295
diff
changeset
|
1205 case T_ADDRESS: |
0 | 1206 { |
1207 jint con = c->as_jint(); | |
1208 if (to_reg->is_single_cpu()) { | |
1209 assert(patch_code == lir_patch_none, "no patching handled here"); | |
1210 __ set(con, to_reg->as_register()); | |
1211 } else { | |
1212 ShouldNotReachHere(); | |
1213 assert(to_reg->is_single_fpu(), "wrong register kind"); | |
1214 | |
1215 __ set(con, O7); | |
727 | 1216 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS); |
0 | 1217 __ st(O7, temp_slot); |
1218 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); | |
1219 } | |
1220 } | |
1221 break; | |
1222 | |
1223 case T_LONG: | |
1224 { | |
1225 jlong con = c->as_jlong(); | |
1226 | |
1227 if (to_reg->is_double_cpu()) { | |
1228 #ifdef _LP64 | |
1229 __ set(con, to_reg->as_register_lo()); | |
1230 #else | |
1231 __ set(low(con), to_reg->as_register_lo()); | |
1232 __ set(high(con), to_reg->as_register_hi()); | |
1233 #endif | |
1234 #ifdef _LP64 | |
1235 } else if (to_reg->is_single_cpu()) { | |
1236 __ set(con, to_reg->as_register()); | |
1237 #endif | |
1238 } else { | |
1239 ShouldNotReachHere(); | |
1240 assert(to_reg->is_double_fpu(), "wrong register kind"); | |
727 | 1241 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS); |
1242 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); | |
0 | 1243 __ set(low(con), O7); |
1244 __ st(O7, temp_slot_lo); | |
1245 __ set(high(con), O7); | |
1246 __ st(O7, temp_slot_hi); | |
1247 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg()); | |
1248 } | |
1249 } | |
1250 break; | |
1251 | |
1252 case T_OBJECT: | |
1253 { | |
1254 if (patch_code == lir_patch_none) { | |
1255 jobject2reg(c->as_jobject(), to_reg->as_register()); | |
1256 } else { | |
1257 jobject2reg_with_patching(to_reg->as_register(), info); | |
1258 } | |
1259 } | |
1260 break; | |
1261 | |
1262 case T_FLOAT: | |
1263 { | |
1264 address const_addr = __ float_constant(c->as_jfloat()); | |
1265 if (const_addr == NULL) { | |
1266 bailout("const section overflow"); | |
1267 break; | |
1268 } | |
1269 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); | |
727 | 1270 AddressLiteral const_addrlit(const_addr, rspec); |
0 | 1271 if (to_reg->is_single_fpu()) { |
727 | 1272 __ patchable_sethi(const_addrlit, O7); |
0 | 1273 __ relocate(rspec); |
727 | 1274 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg()); |
0 | 1275 |
1276 } else { | |
1277 assert(to_reg->is_single_cpu(), "Must be a cpu register."); | |
1278 | |
727 | 1279 __ set(const_addrlit, O7); |
0 | 1280 load(O7, 0, to_reg->as_register(), T_INT); |
1281 } | |
1282 } | |
1283 break; | |
1284 | |
1285 case T_DOUBLE: | |
1286 { | |
1287 address const_addr = __ double_constant(c->as_jdouble()); | |
1288 if (const_addr == NULL) { | |
1289 bailout("const section overflow"); | |
1290 break; | |
1291 } | |
1292 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); | |
1293 | |
1294 if (to_reg->is_double_fpu()) { | |
727 | 1295 AddressLiteral const_addrlit(const_addr, rspec); |
1296 __ patchable_sethi(const_addrlit, O7); | |
0 | 1297 __ relocate(rspec); |
727 | 1298 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); |
0 | 1299 } else { |
1300 assert(to_reg->is_double_cpu(), "Must be a long register."); | |
1301 #ifdef _LP64 | |
1302 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); | |
1303 #else | |
1304 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo()); | |
1305 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi()); | |
1306 #endif | |
1307 } | |
1308 | |
1309 } | |
1310 break; | |
1311 | |
1312 default: | |
1313 ShouldNotReachHere(); | |
1314 } | |
1315 } | |
1316 | |
1317 Address LIR_Assembler::as_Address(LIR_Address* addr) { | |
1318 Register reg = addr->base()->as_register(); | |
727 | 1319 return Address(reg, addr->disp()); |
0 | 1320 } |
1321 | |
1322 | |
1323 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { | |
1324 switch (type) { | |
1325 case T_INT: | |
1326 case T_FLOAT: { | |
1327 Register tmp = O7; | |
1328 Address from = frame_map()->address_for_slot(src->single_stack_ix()); | |
1329 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); | |
1330 __ lduw(from.base(), from.disp(), tmp); | |
1331 __ stw(tmp, to.base(), to.disp()); | |
1332 break; | |
1333 } | |
1334 case T_OBJECT: { | |
1335 Register tmp = O7; | |
1336 Address from = frame_map()->address_for_slot(src->single_stack_ix()); | |
1337 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); | |
1338 __ ld_ptr(from.base(), from.disp(), tmp); | |
1339 __ st_ptr(tmp, to.base(), to.disp()); | |
1340 break; | |
1341 } | |
1342 case T_LONG: | |
1343 case T_DOUBLE: { | |
1344 Register tmp = O7; | |
1345 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); | |
1346 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); | |
1347 __ lduw(from.base(), from.disp(), tmp); | |
1348 __ stw(tmp, to.base(), to.disp()); | |
1349 __ lduw(from.base(), from.disp() + 4, tmp); | |
1350 __ stw(tmp, to.base(), to.disp() + 4); | |
1351 break; | |
1352 } | |
1353 | |
1354 default: | |
1355 ShouldNotReachHere(); | |
1356 } | |
1357 } | |
1358 | |
1359 | |
1360 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { | |
1361 Address base = as_Address(addr); | |
727 | 1362 return Address(base.base(), base.disp() + hi_word_offset_in_bytes); |
0 | 1363 } |
1364 | |
1365 | |
1366 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { | |
1367 Address base = as_Address(addr); | |
727 | 1368 return Address(base.base(), base.disp() + lo_word_offset_in_bytes); |
0 | 1369 } |
1370 | |
1371 | |
1372 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, | |
1373 LIR_PatchCode patch_code, CodeEmitInfo* info, bool unaligned) { | |
1374 | |
1375 LIR_Address* addr = src_opr->as_address_ptr(); | |
1376 LIR_Opr to_reg = dest; | |
1377 | |
1378 Register src = addr->base()->as_pointer_register(); | |
1379 Register disp_reg = noreg; | |
1380 int disp_value = addr->disp(); | |
1381 bool needs_patching = (patch_code != lir_patch_none); | |
1382 | |
1383 if (addr->base()->type() == T_OBJECT) { | |
1384 __ verify_oop(src); | |
1385 } | |
1386 | |
1387 PatchingStub* patch = NULL; | |
1388 if (needs_patching) { | |
1389 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
1390 assert(!to_reg->is_double_cpu() || | |
1391 patch_code == lir_patch_none || | |
1392 patch_code == lir_patch_normal, "patching doesn't match register"); | |
1393 } | |
1394 | |
1395 if (addr->index()->is_illegal()) { | |
1396 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { | |
1397 if (needs_patching) { | |
727 | 1398 __ patchable_set(0, O7); |
0 | 1399 } else { |
1400 __ set(disp_value, O7); | |
1401 } | |
1402 disp_reg = O7; | |
1403 } | |
1404 } else if (unaligned || PatchALot) { | |
1405 __ add(src, addr->index()->as_register(), O7); | |
1406 src = O7; | |
1407 } else { | |
1408 disp_reg = addr->index()->as_pointer_register(); | |
1409 assert(disp_value == 0, "can't handle 3 operand addresses"); | |
1410 } | |
1411 | |
1412 // remember the offset of the load. The patching_epilog must be done | |
1413 // before the call to add_debug_info, otherwise the PcDescs don't get | |
1414 // entered in increasing order. | |
1415 int offset = code_offset(); | |
1416 | |
1417 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); | |
1418 if (disp_reg == noreg) { | |
1419 offset = load(src, disp_value, to_reg, type, unaligned); | |
1420 } else { | |
1421 assert(!unaligned, "can't handle this"); | |
1422 offset = load(src, disp_reg, to_reg, type); | |
1423 } | |
1424 | |
1425 if (patch != NULL) { | |
1426 patching_epilog(patch, patch_code, src, info); | |
1427 } | |
1428 | |
1429 if (info != NULL) add_debug_info_for_null_check(offset, info); | |
1430 } | |
1431 | |
1432 | |
1433 void LIR_Assembler::prefetchr(LIR_Opr src) { | |
1434 LIR_Address* addr = src->as_address_ptr(); | |
1435 Address from_addr = as_Address(addr); | |
1436 | |
1437 if (VM_Version::has_v9()) { | |
1438 __ prefetch(from_addr, Assembler::severalReads); | |
1439 } | |
1440 } | |
1441 | |
1442 | |
1443 void LIR_Assembler::prefetchw(LIR_Opr src) { | |
1444 LIR_Address* addr = src->as_address_ptr(); | |
1445 Address from_addr = as_Address(addr); | |
1446 | |
1447 if (VM_Version::has_v9()) { | |
1448 __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads); | |
1449 } | |
1450 } | |
1451 | |
1452 | |
1453 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { | |
1454 Address addr; | |
1455 if (src->is_single_word()) { | |
1456 addr = frame_map()->address_for_slot(src->single_stack_ix()); | |
1457 } else if (src->is_double_word()) { | |
1458 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); | |
1459 } | |
1460 | |
1461 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; | |
1462 load(addr.base(), addr.disp(), dest, dest->type(), unaligned); | |
1463 } | |
1464 | |
1465 | |
1466 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { | |
1467 Address addr; | |
1468 if (dest->is_single_word()) { | |
1469 addr = frame_map()->address_for_slot(dest->single_stack_ix()); | |
1470 } else if (dest->is_double_word()) { | |
1471 addr = frame_map()->address_for_slot(dest->double_stack_ix()); | |
1472 } | |
1473 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; | |
1474 store(from_reg, addr.base(), addr.disp(), from_reg->type(), unaligned); | |
1475 } | |
1476 | |
1477 | |
1478 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { | |
1479 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { | |
1480 if (from_reg->is_double_fpu()) { | |
1481 // double to double moves | |
1482 assert(to_reg->is_double_fpu(), "should match"); | |
1483 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg()); | |
1484 } else { | |
1485 // float to float moves | |
1486 assert(to_reg->is_single_fpu(), "should match"); | |
1487 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg()); | |
1488 } | |
1489 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { | |
1490 if (from_reg->is_double_cpu()) { | |
1491 #ifdef _LP64 | |
1492 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register()); | |
1493 #else | |
1494 assert(to_reg->is_double_cpu() && | |
1495 from_reg->as_register_hi() != to_reg->as_register_lo() && | |
1496 from_reg->as_register_lo() != to_reg->as_register_hi(), | |
1497 "should both be long and not overlap"); | |
1498 // long to long moves | |
1499 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi()); | |
1500 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo()); | |
1501 #endif | |
1502 #ifdef _LP64 | |
1503 } else if (to_reg->is_double_cpu()) { | |
1504 // int to int moves | |
1505 __ mov(from_reg->as_register(), to_reg->as_register_lo()); | |
1506 #endif | |
1507 } else { | |
1508 // int to int moves | |
1509 __ mov(from_reg->as_register(), to_reg->as_register()); | |
1510 } | |
1511 } else { | |
1512 ShouldNotReachHere(); | |
1513 } | |
1514 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { | |
1515 __ verify_oop(to_reg->as_register()); | |
1516 } | |
1517 } | |
1518 | |
1519 | |
1520 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, | |
1521 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, | |
1522 bool unaligned) { | |
1523 LIR_Address* addr = dest->as_address_ptr(); | |
1524 | |
1525 Register src = addr->base()->as_pointer_register(); | |
1526 Register disp_reg = noreg; | |
1527 int disp_value = addr->disp(); | |
1528 bool needs_patching = (patch_code != lir_patch_none); | |
1529 | |
1530 if (addr->base()->is_oop_register()) { | |
1531 __ verify_oop(src); | |
1532 } | |
1533 | |
1534 PatchingStub* patch = NULL; | |
1535 if (needs_patching) { | |
1536 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
1537 assert(!from_reg->is_double_cpu() || | |
1538 patch_code == lir_patch_none || | |
1539 patch_code == lir_patch_normal, "patching doesn't match register"); | |
1540 } | |
1541 | |
1542 if (addr->index()->is_illegal()) { | |
1543 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { | |
1544 if (needs_patching) { | |
727 | 1545 __ patchable_set(0, O7); |
0 | 1546 } else { |
1547 __ set(disp_value, O7); | |
1548 } | |
1549 disp_reg = O7; | |
1550 } | |
1551 } else if (unaligned || PatchALot) { | |
1552 __ add(src, addr->index()->as_register(), O7); | |
1553 src = O7; | |
1554 } else { | |
1555 disp_reg = addr->index()->as_pointer_register(); | |
1556 assert(disp_value == 0, "can't handle 3 operand addresses"); | |
1557 } | |
1558 | |
1559 // remember the offset of the store. The patching_epilog must be done | |
1560 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get | |
1561 // entered in increasing order. | |
1562 int offset; | |
1563 | |
1564 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); | |
1565 if (disp_reg == noreg) { | |
1566 offset = store(from_reg, src, disp_value, type, unaligned); | |
1567 } else { | |
1568 assert(!unaligned, "can't handle this"); | |
1569 offset = store(from_reg, src, disp_reg, type); | |
1570 } | |
1571 | |
1572 if (patch != NULL) { | |
1573 patching_epilog(patch, patch_code, src, info); | |
1574 } | |
1575 | |
1576 if (info != NULL) add_debug_info_for_null_check(offset, info); | |
1577 } | |
1578 | |
1579 | |
1580 void LIR_Assembler::return_op(LIR_Opr result) { | |
1581 // the poll may need a register so just pick one that isn't the return register | |
1582 #ifdef TIERED | |
1583 if (result->type_field() == LIR_OprDesc::long_type) { | |
1584 // Must move the result to G1 | |
1585 // Must leave proper result in O0,O1 and G1 (TIERED only) | |
1586 __ sllx(I0, 32, G1); // Shift bits into high G1 | |
1587 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) | |
1588 __ or3 (I1, G1, G1); // OR 64 bits into G1 | |
1589 } | |
1590 #endif // TIERED | |
1591 __ set((intptr_t)os::get_polling_page(), L0); | |
1592 __ relocate(relocInfo::poll_return_type); | |
1593 __ ld_ptr(L0, 0, G0); | |
1594 __ ret(); | |
1595 __ delayed()->restore(); | |
1596 } | |
1597 | |
1598 | |
1599 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { | |
1600 __ set((intptr_t)os::get_polling_page(), tmp->as_register()); | |
1601 if (info != NULL) { | |
1602 add_debug_info_for_branch(info); | |
1603 } else { | |
1604 __ relocate(relocInfo::poll_type); | |
1605 } | |
1606 | |
1607 int offset = __ offset(); | |
1608 __ ld_ptr(tmp->as_register(), 0, G0); | |
1609 | |
1610 return offset; | |
1611 } | |
1612 | |
1613 | |
1614 void LIR_Assembler::emit_static_call_stub() { | |
1615 address call_pc = __ pc(); | |
1616 address stub = __ start_a_stub(call_stub_size); | |
1617 if (stub == NULL) { | |
1618 bailout("static call stub overflow"); | |
1619 return; | |
1620 } | |
1621 | |
1622 int start = __ offset(); | |
1623 __ relocate(static_stub_Relocation::spec(call_pc)); | |
1624 | |
1625 __ set_oop(NULL, G5); | |
1626 // must be set to -1 at code generation time | |
727 | 1627 AddressLiteral addrlit(-1); |
1628 __ jump_to(addrlit, G3); | |
0 | 1629 __ delayed()->nop(); |
1630 | |
1631 assert(__ offset() - start <= call_stub_size, "stub too big"); | |
1632 __ end_a_stub(); | |
1633 } | |
1634 | |
1635 | |
1636 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { | |
1637 if (opr1->is_single_fpu()) { | |
1638 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg()); | |
1639 } else if (opr1->is_double_fpu()) { | |
1640 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg()); | |
1641 } else if (opr1->is_single_cpu()) { | |
1642 if (opr2->is_constant()) { | |
1643 switch (opr2->as_constant_ptr()->type()) { | |
1644 case T_INT: | |
1645 { jint con = opr2->as_constant_ptr()->as_jint(); | |
1646 if (Assembler::is_simm13(con)) { | |
1647 __ cmp(opr1->as_register(), con); | |
1648 } else { | |
1649 __ set(con, O7); | |
1650 __ cmp(opr1->as_register(), O7); | |
1651 } | |
1652 } | |
1653 break; | |
1654 | |
1655 case T_OBJECT: | |
1656 // there are only equal/notequal comparisions on objects | |
1657 { jobject con = opr2->as_constant_ptr()->as_jobject(); | |
1658 if (con == NULL) { | |
1659 __ cmp(opr1->as_register(), 0); | |
1660 } else { | |
1661 jobject2reg(con, O7); | |
1662 __ cmp(opr1->as_register(), O7); | |
1663 } | |
1664 } | |
1665 break; | |
1666 | |
1667 default: | |
1668 ShouldNotReachHere(); | |
1669 break; | |
1670 } | |
1671 } else { | |
1672 if (opr2->is_address()) { | |
1673 LIR_Address * addr = opr2->as_address_ptr(); | |
1674 BasicType type = addr->type(); | |
1675 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); | |
1676 else __ ld(as_Address(addr), O7); | |
1677 __ cmp(opr1->as_register(), O7); | |
1678 } else { | |
1679 __ cmp(opr1->as_register(), opr2->as_register()); | |
1680 } | |
1681 } | |
1682 } else if (opr1->is_double_cpu()) { | |
1683 Register xlo = opr1->as_register_lo(); | |
1684 Register xhi = opr1->as_register_hi(); | |
1685 if (opr2->is_constant() && opr2->as_jlong() == 0) { | |
1686 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases"); | |
1687 #ifdef _LP64 | |
1688 __ orcc(xhi, G0, G0); | |
1689 #else | |
1690 __ orcc(xhi, xlo, G0); | |
1691 #endif | |
1692 } else if (opr2->is_register()) { | |
1693 Register ylo = opr2->as_register_lo(); | |
1694 Register yhi = opr2->as_register_hi(); | |
1695 #ifdef _LP64 | |
1696 __ cmp(xlo, ylo); | |
1697 #else | |
1698 __ subcc(xlo, ylo, xlo); | |
1699 __ subccc(xhi, yhi, xhi); | |
1700 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { | |
1701 __ orcc(xhi, xlo, G0); | |
1702 } | |
1703 #endif | |
1704 } else { | |
1705 ShouldNotReachHere(); | |
1706 } | |
1707 } else if (opr1->is_address()) { | |
1708 LIR_Address * addr = opr1->as_address_ptr(); | |
1709 BasicType type = addr->type(); | |
1710 assert (opr2->is_constant(), "Checking"); | |
1711 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); | |
1712 else __ ld(as_Address(addr), O7); | |
1713 __ cmp(O7, opr2->as_constant_ptr()->as_jint()); | |
1714 } else { | |
1715 ShouldNotReachHere(); | |
1716 } | |
1717 } | |
1718 | |
1719 | |
1720 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ | |
1721 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { | |
1722 bool is_unordered_less = (code == lir_ucmp_fd2i); | |
1723 if (left->is_single_fpu()) { | |
1724 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register()); | |
1725 } else if (left->is_double_fpu()) { | |
1726 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register()); | |
1727 } else { | |
1728 ShouldNotReachHere(); | |
1729 } | |
1730 } else if (code == lir_cmp_l2i) { | |
1731 __ lcmp(left->as_register_hi(), left->as_register_lo(), | |
1732 right->as_register_hi(), right->as_register_lo(), | |
1733 dst->as_register()); | |
1734 } else { | |
1735 ShouldNotReachHere(); | |
1736 } | |
1737 } | |
1738 | |
1739 | |
1740 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) { | |
1741 | |
1742 Assembler::Condition acond; | |
1743 switch (condition) { | |
1744 case lir_cond_equal: acond = Assembler::equal; break; | |
1745 case lir_cond_notEqual: acond = Assembler::notEqual; break; | |
1746 case lir_cond_less: acond = Assembler::less; break; | |
1747 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; | |
1748 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; | |
1749 case lir_cond_greater: acond = Assembler::greater; break; | |
1750 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; | |
1751 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; | |
1752 default: ShouldNotReachHere(); | |
1753 }; | |
1754 | |
1755 if (opr1->is_constant() && opr1->type() == T_INT) { | |
1756 Register dest = result->as_register(); | |
1757 // load up first part of constant before branch | |
1758 // and do the rest in the delay slot. | |
1759 if (!Assembler::is_simm13(opr1->as_jint())) { | |
1760 __ sethi(opr1->as_jint(), dest); | |
1761 } | |
1762 } else if (opr1->is_constant()) { | |
1763 const2reg(opr1, result, lir_patch_none, NULL); | |
1764 } else if (opr1->is_register()) { | |
1765 reg2reg(opr1, result); | |
1766 } else if (opr1->is_stack()) { | |
1767 stack2reg(opr1, result, result->type()); | |
1768 } else { | |
1769 ShouldNotReachHere(); | |
1770 } | |
1771 Label skip; | |
1772 __ br(acond, false, Assembler::pt, skip); | |
1773 if (opr1->is_constant() && opr1->type() == T_INT) { | |
1774 Register dest = result->as_register(); | |
1775 if (Assembler::is_simm13(opr1->as_jint())) { | |
1776 __ delayed()->or3(G0, opr1->as_jint(), dest); | |
1777 } else { | |
1778 // the sethi has been done above, so just put in the low 10 bits | |
1779 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest); | |
1780 } | |
1781 } else { | |
1782 // can't do anything useful in the delay slot | |
1783 __ delayed()->nop(); | |
1784 } | |
1785 if (opr2->is_constant()) { | |
1786 const2reg(opr2, result, lir_patch_none, NULL); | |
1787 } else if (opr2->is_register()) { | |
1788 reg2reg(opr2, result); | |
1789 } else if (opr2->is_stack()) { | |
1790 stack2reg(opr2, result, result->type()); | |
1791 } else { | |
1792 ShouldNotReachHere(); | |
1793 } | |
1794 __ bind(skip); | |
1795 } | |
1796 | |
1797 | |
1798 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { | |
1799 assert(info == NULL, "unused on this code path"); | |
1800 assert(left->is_register(), "wrong items state"); | |
1801 assert(dest->is_register(), "wrong items state"); | |
1802 | |
1803 if (right->is_register()) { | |
1804 if (dest->is_float_kind()) { | |
1805 | |
1806 FloatRegister lreg, rreg, res; | |
1807 FloatRegisterImpl::Width w; | |
1808 if (right->is_single_fpu()) { | |
1809 w = FloatRegisterImpl::S; | |
1810 lreg = left->as_float_reg(); | |
1811 rreg = right->as_float_reg(); | |
1812 res = dest->as_float_reg(); | |
1813 } else { | |
1814 w = FloatRegisterImpl::D; | |
1815 lreg = left->as_double_reg(); | |
1816 rreg = right->as_double_reg(); | |
1817 res = dest->as_double_reg(); | |
1818 } | |
1819 | |
1820 switch (code) { | |
1821 case lir_add: __ fadd(w, lreg, rreg, res); break; | |
1822 case lir_sub: __ fsub(w, lreg, rreg, res); break; | |
1823 case lir_mul: // fall through | |
1824 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break; | |
1825 case lir_div: // fall through | |
1826 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break; | |
1827 default: ShouldNotReachHere(); | |
1828 } | |
1829 | |
1830 } else if (dest->is_double_cpu()) { | |
1831 #ifdef _LP64 | |
1832 Register dst_lo = dest->as_register_lo(); | |
1833 Register op1_lo = left->as_pointer_register(); | |
1834 Register op2_lo = right->as_pointer_register(); | |
1835 | |
1836 switch (code) { | |
1837 case lir_add: | |
1838 __ add(op1_lo, op2_lo, dst_lo); | |
1839 break; | |
1840 | |
1841 case lir_sub: | |
1842 __ sub(op1_lo, op2_lo, dst_lo); | |
1843 break; | |
1844 | |
1845 default: ShouldNotReachHere(); | |
1846 } | |
1847 #else | |
1848 Register op1_lo = left->as_register_lo(); | |
1849 Register op1_hi = left->as_register_hi(); | |
1850 Register op2_lo = right->as_register_lo(); | |
1851 Register op2_hi = right->as_register_hi(); | |
1852 Register dst_lo = dest->as_register_lo(); | |
1853 Register dst_hi = dest->as_register_hi(); | |
1854 | |
1855 switch (code) { | |
1856 case lir_add: | |
1857 __ addcc(op1_lo, op2_lo, dst_lo); | |
1858 __ addc (op1_hi, op2_hi, dst_hi); | |
1859 break; | |
1860 | |
1861 case lir_sub: | |
1862 __ subcc(op1_lo, op2_lo, dst_lo); | |
1863 __ subc (op1_hi, op2_hi, dst_hi); | |
1864 break; | |
1865 | |
1866 default: ShouldNotReachHere(); | |
1867 } | |
1868 #endif | |
1869 } else { | |
1870 assert (right->is_single_cpu(), "Just Checking"); | |
1871 | |
1872 Register lreg = left->as_register(); | |
1873 Register res = dest->as_register(); | |
1874 Register rreg = right->as_register(); | |
1875 switch (code) { | |
1876 case lir_add: __ add (lreg, rreg, res); break; | |
1877 case lir_sub: __ sub (lreg, rreg, res); break; | |
1878 case lir_mul: __ mult (lreg, rreg, res); break; | |
1879 default: ShouldNotReachHere(); | |
1880 } | |
1881 } | |
1882 } else { | |
1883 assert (right->is_constant(), "must be constant"); | |
1884 | |
1885 if (dest->is_single_cpu()) { | |
1886 Register lreg = left->as_register(); | |
1887 Register res = dest->as_register(); | |
1888 int simm13 = right->as_constant_ptr()->as_jint(); | |
1889 | |
1890 switch (code) { | |
1891 case lir_add: __ add (lreg, simm13, res); break; | |
1892 case lir_sub: __ sub (lreg, simm13, res); break; | |
1893 case lir_mul: __ mult (lreg, simm13, res); break; | |
1894 default: ShouldNotReachHere(); | |
1895 } | |
1896 } else { | |
1897 Register lreg = left->as_pointer_register(); | |
1898 Register res = dest->as_register_lo(); | |
1899 long con = right->as_constant_ptr()->as_jlong(); | |
1900 assert(Assembler::is_simm13(con), "must be simm13"); | |
1901 | |
1902 switch (code) { | |
1903 case lir_add: __ add (lreg, (int)con, res); break; | |
1904 case lir_sub: __ sub (lreg, (int)con, res); break; | |
1905 case lir_mul: __ mult (lreg, (int)con, res); break; | |
1906 default: ShouldNotReachHere(); | |
1907 } | |
1908 } | |
1909 } | |
1910 } | |
1911 | |
1912 | |
1913 void LIR_Assembler::fpop() { | |
1914 // do nothing | |
1915 } | |
1916 | |
1917 | |
1918 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { | |
1919 switch (code) { | |
1920 case lir_sin: | |
1921 case lir_tan: | |
1922 case lir_cos: { | |
1923 assert(thread->is_valid(), "preserve the thread object for performance reasons"); | |
1924 assert(dest->as_double_reg() == F0, "the result will be in f0/f1"); | |
1925 break; | |
1926 } | |
1927 case lir_sqrt: { | |
1928 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); | |
1929 FloatRegister src_reg = value->as_double_reg(); | |
1930 FloatRegister dst_reg = dest->as_double_reg(); | |
1931 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg); | |
1932 break; | |
1933 } | |
1934 case lir_abs: { | |
1935 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); | |
1936 FloatRegister src_reg = value->as_double_reg(); | |
1937 FloatRegister dst_reg = dest->as_double_reg(); | |
1938 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg); | |
1939 break; | |
1940 } | |
1941 default: { | |
1942 ShouldNotReachHere(); | |
1943 break; | |
1944 } | |
1945 } | |
1946 } | |
1947 | |
1948 | |
1949 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { | |
1950 if (right->is_constant()) { | |
1951 if (dest->is_single_cpu()) { | |
1952 int simm13 = right->as_constant_ptr()->as_jint(); | |
1953 switch (code) { | |
1954 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break; | |
1955 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break; | |
1956 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break; | |
1957 default: ShouldNotReachHere(); | |
1958 } | |
1959 } else { | |
1960 long c = right->as_constant_ptr()->as_jlong(); | |
1961 assert(c == (int)c && Assembler::is_simm13(c), "out of range"); | |
1962 int simm13 = (int)c; | |
1963 switch (code) { | |
1964 case lir_logic_and: | |
1965 #ifndef _LP64 | |
1966 __ and3 (left->as_register_hi(), 0, dest->as_register_hi()); | |
1967 #endif | |
1968 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo()); | |
1969 break; | |
1970 | |
1971 case lir_logic_or: | |
1972 #ifndef _LP64 | |
1973 __ or3 (left->as_register_hi(), 0, dest->as_register_hi()); | |
1974 #endif | |
1975 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo()); | |
1976 break; | |
1977 | |
1978 case lir_logic_xor: | |
1979 #ifndef _LP64 | |
1980 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi()); | |
1981 #endif | |
1982 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo()); | |
1983 break; | |
1984 | |
1985 default: ShouldNotReachHere(); | |
1986 } | |
1987 } | |
1988 } else { | |
1989 assert(right->is_register(), "right should be in register"); | |
1990 | |
1991 if (dest->is_single_cpu()) { | |
1992 switch (code) { | |
1993 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break; | |
1994 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break; | |
1995 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break; | |
1996 default: ShouldNotReachHere(); | |
1997 } | |
1998 } else { | |
1999 #ifdef _LP64 | |
2000 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : | |
2001 left->as_register_lo(); | |
2002 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : | |
2003 right->as_register_lo(); | |
2004 | |
2005 switch (code) { | |
2006 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break; | |
2007 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break; | |
2008 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break; | |
2009 default: ShouldNotReachHere(); | |
2010 } | |
2011 #else | |
2012 switch (code) { | |
2013 case lir_logic_and: | |
2014 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); | |
2015 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); | |
2016 break; | |
2017 | |
2018 case lir_logic_or: | |
2019 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); | |
2020 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); | |
2021 break; | |
2022 | |
2023 case lir_logic_xor: | |
2024 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); | |
2025 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); | |
2026 break; | |
2027 | |
2028 default: ShouldNotReachHere(); | |
2029 } | |
2030 #endif | |
2031 } | |
2032 } | |
2033 } | |
2034 | |
2035 | |
2036 int LIR_Assembler::shift_amount(BasicType t) { | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
0
diff
changeset
|
2037 int elem_size = type2aelembytes(t); |
0 | 2038 switch (elem_size) { |
2039 case 1 : return 0; | |
2040 case 2 : return 1; | |
2041 case 4 : return 2; | |
2042 case 8 : return 3; | |
2043 } | |
2044 ShouldNotReachHere(); | |
2045 return -1; | |
2046 } | |
2047 | |
2048 | |
2049 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) { | |
2050 assert(exceptionOop->as_register() == Oexception, "should match"); | |
2051 assert(unwind || exceptionPC->as_register() == Oissuing_pc, "should match"); | |
2052 | |
2053 info->add_register_oop(exceptionOop); | |
2054 | |
2055 if (unwind) { | |
2056 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); | |
2057 __ delayed()->nop(); | |
2058 } else { | |
2059 // reuse the debug info from the safepoint poll for the throw op itself | |
2060 address pc_for_athrow = __ pc(); | |
2061 int pc_for_athrow_offset = __ offset(); | |
2062 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); | |
727 | 2063 __ set(pc_for_athrow, Oissuing_pc, rspec); |
0 | 2064 add_call_info(pc_for_athrow_offset, info); // for exception handler |
2065 | |
2066 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); | |
2067 __ delayed()->nop(); | |
2068 } | |
2069 } | |
2070 | |
2071 | |
2072 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { | |
2073 Register src = op->src()->as_register(); | |
2074 Register dst = op->dst()->as_register(); | |
2075 Register src_pos = op->src_pos()->as_register(); | |
2076 Register dst_pos = op->dst_pos()->as_register(); | |
2077 Register length = op->length()->as_register(); | |
2078 Register tmp = op->tmp()->as_register(); | |
2079 Register tmp2 = O7; | |
2080 | |
2081 int flags = op->flags(); | |
2082 ciArrayKlass* default_type = op->expected_type(); | |
2083 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; | |
2084 if (basic_type == T_ARRAY) basic_type = T_OBJECT; | |
2085 | |
2086 // set up the arraycopy stub information | |
2087 ArrayCopyStub* stub = op->stub(); | |
2088 | |
2089 // always do stub if no type information is available. it's ok if | |
2090 // the known type isn't loaded since the code sanity checks | |
2091 // in debug mode and the type isn't required when we know the exact type | |
2092 // also check that the type is an array type. | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
29
diff
changeset
|
2093 // We also, for now, always call the stub if the barrier set requires a |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
29
diff
changeset
|
2094 // write_ref_pre barrier (which the stub does, but none of the optimized |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
29
diff
changeset
|
2095 // cases currently does). |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
29
diff
changeset
|
2096 if (op->expected_type() == NULL || |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
29
diff
changeset
|
2097 Universe::heap()->barrier_set()->has_write_ref_pre_barrier()) { |
0 | 2098 __ mov(src, O0); |
2099 __ mov(src_pos, O1); | |
2100 __ mov(dst, O2); | |
2101 __ mov(dst_pos, O3); | |
2102 __ mov(length, O4); | |
2103 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); | |
2104 | |
2105 __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry()); | |
2106 __ delayed()->nop(); | |
2107 __ bind(*stub->continuation()); | |
2108 return; | |
2109 } | |
2110 | |
2111 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); | |
2112 | |
2113 // make sure src and dst are non-null and load array length | |
2114 if (flags & LIR_OpArrayCopy::src_null_check) { | |
2115 __ tst(src); | |
2116 __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); | |
2117 __ delayed()->nop(); | |
2118 } | |
2119 | |
2120 if (flags & LIR_OpArrayCopy::dst_null_check) { | |
2121 __ tst(dst); | |
2122 __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); | |
2123 __ delayed()->nop(); | |
2124 } | |
2125 | |
2126 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { | |
2127 // test src_pos register | |
2128 __ tst(src_pos); | |
2129 __ br(Assembler::less, false, Assembler::pn, *stub->entry()); | |
2130 __ delayed()->nop(); | |
2131 } | |
2132 | |
2133 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { | |
2134 // test dst_pos register | |
2135 __ tst(dst_pos); | |
2136 __ br(Assembler::less, false, Assembler::pn, *stub->entry()); | |
2137 __ delayed()->nop(); | |
2138 } | |
2139 | |
2140 if (flags & LIR_OpArrayCopy::length_positive_check) { | |
2141 // make sure length isn't negative | |
2142 __ tst(length); | |
2143 __ br(Assembler::less, false, Assembler::pn, *stub->entry()); | |
2144 __ delayed()->nop(); | |
2145 } | |
2146 | |
2147 if (flags & LIR_OpArrayCopy::src_range_check) { | |
2148 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2); | |
2149 __ add(length, src_pos, tmp); | |
2150 __ cmp(tmp2, tmp); | |
2151 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); | |
2152 __ delayed()->nop(); | |
2153 } | |
2154 | |
2155 if (flags & LIR_OpArrayCopy::dst_range_check) { | |
2156 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2); | |
2157 __ add(length, dst_pos, tmp); | |
2158 __ cmp(tmp2, tmp); | |
2159 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); | |
2160 __ delayed()->nop(); | |
2161 } | |
2162 | |
2163 if (flags & LIR_OpArrayCopy::type_check) { | |
2164 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); | |
2165 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); | |
2166 __ cmp(tmp, tmp2); | |
2167 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); | |
2168 __ delayed()->nop(); | |
2169 } | |
2170 | |
2171 #ifdef ASSERT | |
2172 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { | |
2173 // Sanity check the known type with the incoming class. For the | |
2174 // primitive case the types must match exactly with src.klass and | |
2175 // dst.klass each exactly matching the default type. For the | |
2176 // object array case, if no type check is needed then either the | |
2177 // dst type is exactly the expected type and the src type is a | |
2178 // subtype which we can't check or src is the same array as dst | |
2179 // but not necessarily exactly of type default_type. | |
2180 Label known_ok, halt; | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
2181 jobject2reg(op->expected_type()->constant_encoding(), tmp); |
0 | 2182 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); |
2183 if (basic_type != T_OBJECT) { | |
2184 __ cmp(tmp, tmp2); | |
2185 __ br(Assembler::notEqual, false, Assembler::pn, halt); | |
2186 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); | |
2187 __ cmp(tmp, tmp2); | |
2188 __ br(Assembler::equal, false, Assembler::pn, known_ok); | |
2189 __ delayed()->nop(); | |
2190 } else { | |
2191 __ cmp(tmp, tmp2); | |
2192 __ br(Assembler::equal, false, Assembler::pn, known_ok); | |
2193 __ delayed()->cmp(src, dst); | |
2194 __ br(Assembler::equal, false, Assembler::pn, known_ok); | |
2195 __ delayed()->nop(); | |
2196 } | |
2197 __ bind(halt); | |
2198 __ stop("incorrect type information in arraycopy"); | |
2199 __ bind(known_ok); | |
2200 } | |
2201 #endif | |
2202 | |
2203 int shift = shift_amount(basic_type); | |
2204 | |
2205 Register src_ptr = O0; | |
2206 Register dst_ptr = O1; | |
2207 Register len = O2; | |
2208 | |
2209 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); | |
1060 | 2210 LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null |
0 | 2211 if (shift == 0) { |
2212 __ add(src_ptr, src_pos, src_ptr); | |
2213 } else { | |
2214 __ sll(src_pos, shift, tmp); | |
2215 __ add(src_ptr, tmp, src_ptr); | |
2216 } | |
2217 | |
2218 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); | |
1060 | 2219 LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null |
0 | 2220 if (shift == 0) { |
2221 __ add(dst_ptr, dst_pos, dst_ptr); | |
2222 } else { | |
2223 __ sll(dst_pos, shift, tmp); | |
2224 __ add(dst_ptr, tmp, dst_ptr); | |
2225 } | |
2226 | |
2227 if (basic_type != T_OBJECT) { | |
2228 if (shift == 0) { | |
2229 __ mov(length, len); | |
2230 } else { | |
2231 __ sll(length, shift, len); | |
2232 } | |
2233 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy)); | |
2234 } else { | |
2235 // oop_arraycopy takes a length in number of elements, so don't scale it. | |
2236 __ mov(length, len); | |
2237 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy)); | |
2238 } | |
2239 | |
2240 __ bind(*stub->continuation()); | |
2241 } | |
2242 | |
2243 | |
2244 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { | |
2245 if (dest->is_single_cpu()) { | |
2246 #ifdef _LP64 | |
2247 if (left->type() == T_OBJECT) { | |
2248 switch (code) { | |
2249 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break; | |
2250 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break; | |
2251 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; | |
2252 default: ShouldNotReachHere(); | |
2253 } | |
2254 } else | |
2255 #endif | |
2256 switch (code) { | |
2257 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break; | |
2258 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break; | |
2259 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; | |
2260 default: ShouldNotReachHere(); | |
2261 } | |
2262 } else { | |
2263 #ifdef _LP64 | |
2264 switch (code) { | |
2265 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; | |
2266 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; | |
2267 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; | |
2268 default: ShouldNotReachHere(); | |
2269 } | |
2270 #else | |
2271 switch (code) { | |
2272 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; | |
2273 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; | |
2274 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; | |
2275 default: ShouldNotReachHere(); | |
2276 } | |
2277 #endif | |
2278 } | |
2279 } | |
2280 | |
2281 | |
2282 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { | |
2283 #ifdef _LP64 | |
2284 if (left->type() == T_OBJECT) { | |
2285 count = count & 63; // shouldn't shift by more than sizeof(intptr_t) | |
2286 Register l = left->as_register(); | |
2287 Register d = dest->as_register_lo(); | |
2288 switch (code) { | |
2289 case lir_shl: __ sllx (l, count, d); break; | |
2290 case lir_shr: __ srax (l, count, d); break; | |
2291 case lir_ushr: __ srlx (l, count, d); break; | |
2292 default: ShouldNotReachHere(); | |
2293 } | |
2294 return; | |
2295 } | |
2296 #endif | |
2297 | |
2298 if (dest->is_single_cpu()) { | |
2299 count = count & 0x1F; // Java spec | |
2300 switch (code) { | |
2301 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break; | |
2302 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break; | |
2303 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break; | |
2304 default: ShouldNotReachHere(); | |
2305 } | |
2306 } else if (dest->is_double_cpu()) { | |
2307 count = count & 63; // Java spec | |
2308 switch (code) { | |
2309 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break; | |
2310 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break; | |
2311 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break; | |
2312 default: ShouldNotReachHere(); | |
2313 } | |
2314 } else { | |
2315 ShouldNotReachHere(); | |
2316 } | |
2317 } | |
2318 | |
2319 | |
2320 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { | |
2321 assert(op->tmp1()->as_register() == G1 && | |
2322 op->tmp2()->as_register() == G3 && | |
2323 op->tmp3()->as_register() == G4 && | |
2324 op->obj()->as_register() == O0 && | |
2325 op->klass()->as_register() == G5, "must be"); | |
2326 if (op->init_check()) { | |
2327 __ ld(op->klass()->as_register(), | |
2328 instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), | |
2329 op->tmp1()->as_register()); | |
2330 add_debug_info_for_null_check_here(op->stub()->info()); | |
2331 __ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized); | |
2332 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry()); | |
2333 __ delayed()->nop(); | |
2334 } | |
2335 __ allocate_object(op->obj()->as_register(), | |
2336 op->tmp1()->as_register(), | |
2337 op->tmp2()->as_register(), | |
2338 op->tmp3()->as_register(), | |
2339 op->header_size(), | |
2340 op->object_size(), | |
2341 op->klass()->as_register(), | |
2342 *op->stub()->entry()); | |
2343 __ bind(*op->stub()->continuation()); | |
2344 __ verify_oop(op->obj()->as_register()); | |
2345 } | |
2346 | |
2347 | |
2348 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { | |
2349 assert(op->tmp1()->as_register() == G1 && | |
2350 op->tmp2()->as_register() == G3 && | |
2351 op->tmp3()->as_register() == G4 && | |
2352 op->tmp4()->as_register() == O1 && | |
2353 op->klass()->as_register() == G5, "must be"); | |
2354 if (UseSlowPath || | |
2355 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || | |
2356 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { | |
2357 __ br(Assembler::always, false, Assembler::pn, *op->stub()->entry()); | |
2358 __ delayed()->nop(); | |
2359 } else { | |
2360 __ allocate_array(op->obj()->as_register(), | |
2361 op->len()->as_register(), | |
2362 op->tmp1()->as_register(), | |
2363 op->tmp2()->as_register(), | |
2364 op->tmp3()->as_register(), | |
2365 arrayOopDesc::header_size(op->type()), | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
0
diff
changeset
|
2366 type2aelembytes(op->type()), |
0 | 2367 op->klass()->as_register(), |
2368 *op->stub()->entry()); | |
2369 } | |
2370 __ bind(*op->stub()->continuation()); | |
2371 } | |
2372 | |
2373 | |
2374 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { | |
2375 LIR_Code code = op->code(); | |
2376 if (code == lir_store_check) { | |
2377 Register value = op->object()->as_register(); | |
2378 Register array = op->array()->as_register(); | |
2379 Register k_RInfo = op->tmp1()->as_register(); | |
2380 Register klass_RInfo = op->tmp2()->as_register(); | |
2381 Register Rtmp1 = op->tmp3()->as_register(); | |
2382 | |
2383 __ verify_oop(value); | |
2384 | |
2385 CodeStub* stub = op->stub(); | |
2386 Label done; | |
2387 __ cmp(value, 0); | |
2388 __ br(Assembler::equal, false, Assembler::pn, done); | |
2389 __ delayed()->nop(); | |
2390 load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception()); | |
2391 load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL); | |
2392 | |
2393 // get instance klass | |
2394 load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL); | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2395 // perform the fast part of the checking logic |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2396 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, &done, stub->entry(), NULL); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2397 |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2398 // call out-of-line instance of __ check_klass_subtype_slow_path(...): |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2399 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); |
0 | 2400 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); |
2401 __ delayed()->nop(); | |
2402 __ cmp(G3, 0); | |
2403 __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); | |
2404 __ delayed()->nop(); | |
2405 __ bind(done); | |
2406 } else if (op->code() == lir_checkcast) { | |
2407 // we always need a stub for the failure case. | |
2408 CodeStub* stub = op->stub(); | |
2409 Register obj = op->object()->as_register(); | |
2410 Register k_RInfo = op->tmp1()->as_register(); | |
2411 Register klass_RInfo = op->tmp2()->as_register(); | |
2412 Register dst = op->result_opr()->as_register(); | |
2413 Register Rtmp1 = op->tmp3()->as_register(); | |
2414 ciKlass* k = op->klass(); | |
2415 | |
2416 if (obj == k_RInfo) { | |
2417 k_RInfo = klass_RInfo; | |
2418 klass_RInfo = obj; | |
2419 } | |
2420 if (op->profiled_method() != NULL) { | |
2421 ciMethod* method = op->profiled_method(); | |
2422 int bci = op->profiled_bci(); | |
2423 | |
2424 // We need two temporaries to perform this operation on SPARC, | |
2425 // so to keep things simple we perform a redundant test here | |
2426 Label profile_done; | |
2427 __ cmp(obj, 0); | |
2428 __ br(Assembler::notEqual, false, Assembler::pn, profile_done); | |
2429 __ delayed()->nop(); | |
2430 // Object is null; update methodDataOop | |
2431 ciMethodData* md = method->method_data(); | |
2432 if (md == NULL) { | |
2433 bailout("out of memory building methodDataOop"); | |
2434 return; | |
2435 } | |
2436 ciProfileData* data = md->bci_to_data(bci); | |
2437 assert(data != NULL, "need data for checkcast"); | |
2438 assert(data->is_BitData(), "need BitData for checkcast"); | |
2439 Register mdo = k_RInfo; | |
2440 Register data_val = Rtmp1; | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
2441 jobject2reg(md->constant_encoding(), mdo); |
0 | 2442 |
2443 int mdo_offset_bias = 0; | |
2444 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { | |
2445 // The offset is large so bias the mdo by the base of the slot so | |
2446 // that the ld can use simm13s to reference the slots of the data | |
2447 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); | |
2448 __ set(mdo_offset_bias, data_val); | |
2449 __ add(mdo, data_val, mdo); | |
2450 } | |
2451 | |
2452 | |
727 | 2453 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); |
0 | 2454 __ ldub(flags_addr, data_val); |
2455 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); | |
2456 __ stb(data_val, flags_addr); | |
2457 __ bind(profile_done); | |
2458 } | |
2459 | |
2460 Label done; | |
2461 // patching may screw with our temporaries on sparc, | |
2462 // so let's do it before loading the class | |
2463 if (k->is_loaded()) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
2464 jobject2reg(k->constant_encoding(), k_RInfo); |
0 | 2465 } else { |
2466 jobject2reg_with_patching(k_RInfo, op->info_for_patch()); | |
2467 } | |
2468 assert(obj != k_RInfo, "must be different"); | |
2469 __ cmp(obj, 0); | |
2470 __ br(Assembler::equal, false, Assembler::pn, done); | |
2471 __ delayed()->nop(); | |
2472 | |
2473 // get object class | |
2474 // not a safepoint as obj null check happens earlier | |
2475 load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL); | |
2476 if (op->fast_check()) { | |
2477 assert_different_registers(klass_RInfo, k_RInfo); | |
2478 __ cmp(k_RInfo, klass_RInfo); | |
2479 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); | |
2480 __ delayed()->nop(); | |
2481 __ bind(done); | |
2482 } else { | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2483 bool need_slow_path = true; |
0 | 2484 if (k->is_loaded()) { |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2485 if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()) |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2486 need_slow_path = false; |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2487 // perform the fast part of the checking logic |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2488 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2489 (need_slow_path ? &done : NULL), |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2490 stub->entry(), NULL, |
665
c89f86385056
6814659: separable cleanups and subroutines for 6655638
jrose
parents:
644
diff
changeset
|
2491 RegisterOrConstant(k->super_check_offset())); |
0 | 2492 } else { |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2493 // perform the fast part of the checking logic |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2494 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2495 &done, stub->entry(), NULL); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2496 } |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2497 if (need_slow_path) { |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2498 // call out-of-line instance of __ check_klass_subtype_slow_path(...): |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2499 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); |
0 | 2500 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); |
2501 __ delayed()->nop(); | |
2502 __ cmp(G3, 0); | |
2503 __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); | |
2504 __ delayed()->nop(); | |
2505 } | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2506 __ bind(done); |
0 | 2507 } |
2508 __ mov(obj, dst); | |
2509 } else if (code == lir_instanceof) { | |
2510 Register obj = op->object()->as_register(); | |
2511 Register k_RInfo = op->tmp1()->as_register(); | |
2512 Register klass_RInfo = op->tmp2()->as_register(); | |
2513 Register dst = op->result_opr()->as_register(); | |
2514 Register Rtmp1 = op->tmp3()->as_register(); | |
2515 ciKlass* k = op->klass(); | |
2516 | |
2517 Label done; | |
2518 if (obj == k_RInfo) { | |
2519 k_RInfo = klass_RInfo; | |
2520 klass_RInfo = obj; | |
2521 } | |
2522 // patching may screw with our temporaries on sparc, | |
2523 // so let's do it before loading the class | |
2524 if (k->is_loaded()) { | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
2525 jobject2reg(k->constant_encoding(), k_RInfo); |
0 | 2526 } else { |
2527 jobject2reg_with_patching(k_RInfo, op->info_for_patch()); | |
2528 } | |
2529 assert(obj != k_RInfo, "must be different"); | |
2530 __ cmp(obj, 0); | |
2531 __ br(Assembler::equal, true, Assembler::pn, done); | |
2532 __ delayed()->set(0, dst); | |
2533 | |
2534 // get object class | |
2535 // not a safepoint as obj null check happens earlier | |
2536 load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL); | |
2537 if (op->fast_check()) { | |
2538 __ cmp(k_RInfo, klass_RInfo); | |
2539 __ br(Assembler::equal, true, Assembler::pt, done); | |
2540 __ delayed()->set(1, dst); | |
2541 __ set(0, dst); | |
2542 __ bind(done); | |
2543 } else { | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2544 bool need_slow_path = true; |
0 | 2545 if (k->is_loaded()) { |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2546 if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()) |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2547 need_slow_path = false; |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2548 // perform the fast part of the checking logic |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2549 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, noreg, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2550 (need_slow_path ? &done : NULL), |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2551 (need_slow_path ? &done : NULL), NULL, |
665
c89f86385056
6814659: separable cleanups and subroutines for 6655638
jrose
parents:
644
diff
changeset
|
2552 RegisterOrConstant(k->super_check_offset()), |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2553 dst); |
0 | 2554 } else { |
2555 assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers"); | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2556 // perform the fast part of the checking logic |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2557 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, dst, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2558 &done, &done, NULL, |
665
c89f86385056
6814659: separable cleanups and subroutines for 6655638
jrose
parents:
644
diff
changeset
|
2559 RegisterOrConstant(-1), |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2560 dst); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2561 } |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2562 if (need_slow_path) { |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2563 // call out-of-line instance of __ check_klass_subtype_slow_path(...): |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2564 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); |
0 | 2565 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); |
2566 __ delayed()->nop(); | |
2567 __ mov(G3, dst); | |
2568 } | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2569 __ bind(done); |
0 | 2570 } |
2571 } else { | |
2572 ShouldNotReachHere(); | |
2573 } | |
2574 | |
2575 } | |
2576 | |
2577 | |
2578 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { | |
2579 if (op->code() == lir_cas_long) { | |
2580 assert(VM_Version::supports_cx8(), "wrong machine"); | |
2581 Register addr = op->addr()->as_pointer_register(); | |
2582 Register cmp_value_lo = op->cmp_value()->as_register_lo(); | |
2583 Register cmp_value_hi = op->cmp_value()->as_register_hi(); | |
2584 Register new_value_lo = op->new_value()->as_register_lo(); | |
2585 Register new_value_hi = op->new_value()->as_register_hi(); | |
2586 Register t1 = op->tmp1()->as_register(); | |
2587 Register t2 = op->tmp2()->as_register(); | |
2588 #ifdef _LP64 | |
2589 __ mov(cmp_value_lo, t1); | |
2590 __ mov(new_value_lo, t2); | |
2591 #else | |
2592 // move high and low halves of long values into single registers | |
2593 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg | |
2594 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half | |
2595 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value | |
2596 __ sllx(new_value_hi, 32, t2); | |
2597 __ srl(new_value_lo, 0, new_value_lo); | |
2598 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap | |
2599 #endif | |
2600 // perform the compare and swap operation | |
2601 __ casx(addr, t1, t2); | |
2602 // generate condition code - if the swap succeeded, t2 ("new value" reg) was | |
2603 // overwritten with the original value in "addr" and will be equal to t1. | |
2604 __ cmp(t1, t2); | |
2605 | |
2606 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { | |
2607 Register addr = op->addr()->as_pointer_register(); | |
2608 Register cmp_value = op->cmp_value()->as_register(); | |
2609 Register new_value = op->new_value()->as_register(); | |
2610 Register t1 = op->tmp1()->as_register(); | |
2611 Register t2 = op->tmp2()->as_register(); | |
2612 __ mov(cmp_value, t1); | |
2613 __ mov(new_value, t2); | |
2614 #ifdef _LP64 | |
2615 if (op->code() == lir_cas_obj) { | |
2616 __ casx(addr, t1, t2); | |
2617 } else | |
2618 #endif | |
2619 { | |
2620 __ cas(addr, t1, t2); | |
2621 } | |
2622 __ cmp(t1, t2); | |
2623 } else { | |
2624 Unimplemented(); | |
2625 } | |
2626 } | |
2627 | |
2628 void LIR_Assembler::set_24bit_FPU() { | |
2629 Unimplemented(); | |
2630 } | |
2631 | |
2632 | |
2633 void LIR_Assembler::reset_FPU() { | |
2634 Unimplemented(); | |
2635 } | |
2636 | |
2637 | |
2638 void LIR_Assembler::breakpoint() { | |
2639 __ breakpoint_trap(); | |
2640 } | |
2641 | |
2642 | |
2643 void LIR_Assembler::push(LIR_Opr opr) { | |
2644 Unimplemented(); | |
2645 } | |
2646 | |
2647 | |
2648 void LIR_Assembler::pop(LIR_Opr opr) { | |
2649 Unimplemented(); | |
2650 } | |
2651 | |
2652 | |
2653 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { | |
2654 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); | |
2655 Register dst = dst_opr->as_register(); | |
2656 Register reg = mon_addr.base(); | |
2657 int offset = mon_addr.disp(); | |
2658 // compute pointer to BasicLock | |
2659 if (mon_addr.is_simm13()) { | |
2660 __ add(reg, offset, dst); | |
2661 } else { | |
2662 __ set(offset, dst); | |
2663 __ add(dst, reg, dst); | |
2664 } | |
2665 } | |
2666 | |
2667 | |
2668 void LIR_Assembler::emit_lock(LIR_OpLock* op) { | |
2669 Register obj = op->obj_opr()->as_register(); | |
2670 Register hdr = op->hdr_opr()->as_register(); | |
2671 Register lock = op->lock_opr()->as_register(); | |
2672 | |
2673 // obj may not be an oop | |
2674 if (op->code() == lir_lock) { | |
2675 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); | |
2676 if (UseFastLocking) { | |
2677 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); | |
2678 // add debug info for NullPointerException only if one is possible | |
2679 if (op->info() != NULL) { | |
2680 add_debug_info_for_null_check_here(op->info()); | |
2681 } | |
2682 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); | |
2683 } else { | |
2684 // always do slow locking | |
2685 // note: the slow locking code could be inlined here, however if we use | |
2686 // slow locking, speed doesn't matter anyway and this solution is | |
2687 // simpler and requires less duplicated code - additionally, the | |
2688 // slow locking code is the same in either case which simplifies | |
2689 // debugging | |
2690 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); | |
2691 __ delayed()->nop(); | |
2692 } | |
2693 } else { | |
2694 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); | |
2695 if (UseFastLocking) { | |
2696 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); | |
2697 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); | |
2698 } else { | |
2699 // always do slow unlocking | |
2700 // note: the slow unlocking code could be inlined here, however if we use | |
2701 // slow unlocking, speed doesn't matter anyway and this solution is | |
2702 // simpler and requires less duplicated code - additionally, the | |
2703 // slow unlocking code is the same in either case which simplifies | |
2704 // debugging | |
2705 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); | |
2706 __ delayed()->nop(); | |
2707 } | |
2708 } | |
2709 __ bind(*op->stub()->continuation()); | |
2710 } | |
2711 | |
2712 | |
2713 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { | |
2714 ciMethod* method = op->profiled_method(); | |
2715 int bci = op->profiled_bci(); | |
2716 | |
2717 // Update counter for all call types | |
2718 ciMethodData* md = method->method_data(); | |
2719 if (md == NULL) { | |
2720 bailout("out of memory building methodDataOop"); | |
2721 return; | |
2722 } | |
2723 ciProfileData* data = md->bci_to_data(bci); | |
2724 assert(data->is_CounterData(), "need CounterData for calls"); | |
2725 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); | |
2726 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); | |
2727 Register mdo = op->mdo()->as_register(); | |
2728 Register tmp1 = op->tmp1()->as_register(); | |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
2729 jobject2reg(md->constant_encoding(), mdo); |
0 | 2730 int mdo_offset_bias = 0; |
2731 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + | |
2732 data->size_in_bytes())) { | |
2733 // The offset is large so bias the mdo by the base of the slot so | |
2734 // that the ld can use simm13s to reference the slots of the data | |
2735 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); | |
2736 __ set(mdo_offset_bias, O7); | |
2737 __ add(mdo, O7, mdo); | |
2738 } | |
2739 | |
727 | 2740 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); |
0 | 2741 Bytecodes::Code bc = method->java_code_at_bci(bci); |
2742 // Perform additional virtual call profiling for invokevirtual and | |
2743 // invokeinterface bytecodes | |
2744 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && | |
2745 Tier1ProfileVirtualCalls) { | |
2746 assert(op->recv()->is_single_cpu(), "recv must be allocated"); | |
2747 Register recv = op->recv()->as_register(); | |
2748 assert_different_registers(mdo, tmp1, recv); | |
2749 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); | |
2750 ciKlass* known_klass = op->known_holder(); | |
2751 if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) { | |
2752 // We know the type that will be seen at this call site; we can | |
2753 // statically update the methodDataOop rather than needing to do | |
2754 // dynamic tests on the receiver type | |
2755 | |
2756 // NOTE: we should probably put a lock around this search to | |
2757 // avoid collisions by concurrent compilations | |
2758 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; | |
2759 uint i; | |
2760 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
2761 ciKlass* receiver = vc_data->receiver(i); | |
2762 if (known_klass->equals(receiver)) { | |
727 | 2763 Address data_addr(mdo, md->byte_offset_of_slot(data, |
2764 VirtualCallData::receiver_count_offset(i)) - | |
0 | 2765 mdo_offset_bias); |
2766 __ lduw(data_addr, tmp1); | |
2767 __ add(tmp1, DataLayout::counter_increment, tmp1); | |
2768 __ stw(tmp1, data_addr); | |
2769 return; | |
2770 } | |
2771 } | |
2772 | |
2773 // Receiver type not found in profile data; select an empty slot | |
2774 | |
2775 // Note that this is less efficient than it should be because it | |
2776 // always does a write to the receiver part of the | |
2777 // VirtualCallData rather than just the first time | |
2778 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
2779 ciKlass* receiver = vc_data->receiver(i); | |
2780 if (receiver == NULL) { | |
727 | 2781 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - |
0 | 2782 mdo_offset_bias); |
989
148e5441d916
6863023: need non-perm oops in code cache for JSR 292
jrose
parents:
780
diff
changeset
|
2783 jobject2reg(known_klass->constant_encoding(), tmp1); |
0 | 2784 __ st_ptr(tmp1, recv_addr); |
727 | 2785 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - |
0 | 2786 mdo_offset_bias); |
2787 __ lduw(data_addr, tmp1); | |
2788 __ add(tmp1, DataLayout::counter_increment, tmp1); | |
2789 __ stw(tmp1, data_addr); | |
2790 return; | |
2791 } | |
2792 } | |
2793 } else { | |
727 | 2794 load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); |
0 | 2795 Label update_done; |
2796 uint i; | |
2797 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
2798 Label next_test; | |
2799 // See if the receiver is receiver[n]. | |
727 | 2800 Address receiver_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - |
0 | 2801 mdo_offset_bias); |
2802 __ ld_ptr(receiver_addr, tmp1); | |
2803 __ verify_oop(tmp1); | |
2804 __ cmp(recv, tmp1); | |
2805 __ brx(Assembler::notEqual, false, Assembler::pt, next_test); | |
2806 __ delayed()->nop(); | |
727 | 2807 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - |
0 | 2808 mdo_offset_bias); |
2809 __ lduw(data_addr, tmp1); | |
2810 __ add(tmp1, DataLayout::counter_increment, tmp1); | |
2811 __ stw(tmp1, data_addr); | |
2812 __ br(Assembler::always, false, Assembler::pt, update_done); | |
2813 __ delayed()->nop(); | |
2814 __ bind(next_test); | |
2815 } | |
2816 | |
2817 // Didn't find receiver; find next empty slot and fill it in | |
2818 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
2819 Label next_test; | |
727 | 2820 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - |
0 | 2821 mdo_offset_bias); |
2822 load(recv_addr, tmp1, T_OBJECT); | |
2823 __ tst(tmp1); | |
2824 __ brx(Assembler::notEqual, false, Assembler::pt, next_test); | |
2825 __ delayed()->nop(); | |
2826 __ st_ptr(recv, recv_addr); | |
2827 __ set(DataLayout::counter_increment, tmp1); | |
727 | 2828 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - |
2829 mdo_offset_bias); | |
1251
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
2830 __ br(Assembler::always, false, Assembler::pt, update_done); |
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
2831 __ delayed()->nop(); |
0 | 2832 __ bind(next_test); |
2833 } | |
1251
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
2834 // Receiver did not match any saved receiver and there is no empty row for it. |
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
2835 // Increment total counter to indicate polymorphic case. |
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
2836 __ lduw(counter_addr, tmp1); |
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
2837 __ add(tmp1, DataLayout::counter_increment, tmp1); |
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
2838 __ stw(tmp1, counter_addr); |
0 | 2839 |
2840 __ bind(update_done); | |
2841 } | |
1251
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
2842 } else { |
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
2843 // Static call |
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
2844 __ lduw(counter_addr, tmp1); |
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
2845 __ add(tmp1, DataLayout::counter_increment, tmp1); |
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
2846 __ stw(tmp1, counter_addr); |
0 | 2847 } |
2848 } | |
2849 | |
2850 | |
2851 void LIR_Assembler::align_backward_branch_target() { | |
1365 | 2852 __ align(OptoLoopAlignment); |
0 | 2853 } |
2854 | |
2855 | |
2856 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { | |
2857 // make sure we are expecting a delay | |
2858 // this has the side effect of clearing the delay state | |
2859 // so we can use _masm instead of _masm->delayed() to do the | |
2860 // code generation. | |
2861 __ delayed(); | |
2862 | |
2863 // make sure we only emit one instruction | |
2864 int offset = code_offset(); | |
2865 op->delay_op()->emit_code(this); | |
2866 #ifdef ASSERT | |
2867 if (code_offset() - offset != NativeInstruction::nop_instruction_size) { | |
2868 op->delay_op()->print(); | |
2869 } | |
2870 assert(code_offset() - offset == NativeInstruction::nop_instruction_size, | |
2871 "only one instruction can go in a delay slot"); | |
2872 #endif | |
2873 | |
2874 // we may also be emitting the call info for the instruction | |
2875 // which we are the delay slot of. | |
2876 CodeEmitInfo * call_info = op->call_info(); | |
2877 if (call_info) { | |
2878 add_call_info(code_offset(), call_info); | |
2879 } | |
2880 | |
2881 if (VerifyStackAtCalls) { | |
2882 _masm->sub(FP, SP, O7); | |
2883 _masm->cmp(O7, initial_frame_size_in_bytes()); | |
2884 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 ); | |
2885 } | |
2886 } | |
2887 | |
2888 | |
2889 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { | |
2890 assert(left->is_register(), "can only handle registers"); | |
2891 | |
2892 if (left->is_single_cpu()) { | |
2893 __ neg(left->as_register(), dest->as_register()); | |
2894 } else if (left->is_single_fpu()) { | |
2895 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg()); | |
2896 } else if (left->is_double_fpu()) { | |
2897 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg()); | |
2898 } else { | |
2899 assert (left->is_double_cpu(), "Must be a long"); | |
2900 Register Rlow = left->as_register_lo(); | |
2901 Register Rhi = left->as_register_hi(); | |
2902 #ifdef _LP64 | |
2903 __ sub(G0, Rlow, dest->as_register_lo()); | |
2904 #else | |
2905 __ subcc(G0, Rlow, dest->as_register_lo()); | |
2906 __ subc (G0, Rhi, dest->as_register_hi()); | |
2907 #endif | |
2908 } | |
2909 } | |
2910 | |
2911 | |
2912 void LIR_Assembler::fxch(int i) { | |
2913 Unimplemented(); | |
2914 } | |
2915 | |
2916 void LIR_Assembler::fld(int i) { | |
2917 Unimplemented(); | |
2918 } | |
2919 | |
2920 void LIR_Assembler::ffree(int i) { | |
2921 Unimplemented(); | |
2922 } | |
2923 | |
2924 void LIR_Assembler::rt_call(LIR_Opr result, address dest, | |
2925 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { | |
2926 | |
2927 // if tmp is invalid, then the function being called doesn't destroy the thread | |
2928 if (tmp->is_valid()) { | |
2929 __ save_thread(tmp->as_register()); | |
2930 } | |
2931 __ call(dest, relocInfo::runtime_call_type); | |
2932 __ delayed()->nop(); | |
2933 if (info != NULL) { | |
2934 add_call_info_here(info); | |
2935 } | |
2936 if (tmp->is_valid()) { | |
2937 __ restore_thread(tmp->as_register()); | |
2938 } | |
2939 | |
2940 #ifdef ASSERT | |
2941 __ verify_thread(); | |
2942 #endif // ASSERT | |
2943 } | |
2944 | |
2945 | |
2946 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { | |
2947 #ifdef _LP64 | |
2948 ShouldNotReachHere(); | |
2949 #endif | |
2950 | |
2951 NEEDS_CLEANUP; | |
2952 if (type == T_LONG) { | |
2953 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr(); | |
2954 | |
2955 // (extended to allow indexed as well as constant displaced for JSR-166) | |
2956 Register idx = noreg; // contains either constant offset or index | |
2957 | |
2958 int disp = mem_addr->disp(); | |
2959 if (mem_addr->index() == LIR_OprFact::illegalOpr) { | |
2960 if (!Assembler::is_simm13(disp)) { | |
2961 idx = O7; | |
2962 __ set(disp, idx); | |
2963 } | |
2964 } else { | |
2965 assert(disp == 0, "not both indexed and disp"); | |
2966 idx = mem_addr->index()->as_register(); | |
2967 } | |
2968 | |
2969 int null_check_offset = -1; | |
2970 | |
2971 Register base = mem_addr->base()->as_register(); | |
2972 if (src->is_register() && dest->is_address()) { | |
2973 // G4 is high half, G5 is low half | |
2974 if (VM_Version::v9_instructions_work()) { | |
2975 // clear the top bits of G5, and scale up G4 | |
2976 __ srl (src->as_register_lo(), 0, G5); | |
2977 __ sllx(src->as_register_hi(), 32, G4); | |
2978 // combine the two halves into the 64 bits of G4 | |
2979 __ or3(G4, G5, G4); | |
2980 null_check_offset = __ offset(); | |
2981 if (idx == noreg) { | |
2982 __ stx(G4, base, disp); | |
2983 } else { | |
2984 __ stx(G4, base, idx); | |
2985 } | |
2986 } else { | |
2987 __ mov (src->as_register_hi(), G4); | |
2988 __ mov (src->as_register_lo(), G5); | |
2989 null_check_offset = __ offset(); | |
2990 if (idx == noreg) { | |
2991 __ std(G4, base, disp); | |
2992 } else { | |
2993 __ std(G4, base, idx); | |
2994 } | |
2995 } | |
2996 } else if (src->is_address() && dest->is_register()) { | |
2997 null_check_offset = __ offset(); | |
2998 if (VM_Version::v9_instructions_work()) { | |
2999 if (idx == noreg) { | |
3000 __ ldx(base, disp, G5); | |
3001 } else { | |
3002 __ ldx(base, idx, G5); | |
3003 } | |
3004 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi | |
3005 __ mov (G5, dest->as_register_lo()); // copy low half into lo | |
3006 } else { | |
3007 if (idx == noreg) { | |
3008 __ ldd(base, disp, G4); | |
3009 } else { | |
3010 __ ldd(base, idx, G4); | |
3011 } | |
3012 // G4 is high half, G5 is low half | |
3013 __ mov (G4, dest->as_register_hi()); | |
3014 __ mov (G5, dest->as_register_lo()); | |
3015 } | |
3016 } else { | |
3017 Unimplemented(); | |
3018 } | |
3019 if (info != NULL) { | |
3020 add_debug_info_for_null_check(null_check_offset, info); | |
3021 } | |
3022 | |
3023 } else { | |
3024 // use normal move for all other volatiles since they don't need | |
3025 // special handling to remain atomic. | |
3026 move_op(src, dest, type, lir_patch_none, info, false, false); | |
3027 } | |
3028 } | |
3029 | |
3030 void LIR_Assembler::membar() { | |
3031 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode | |
3032 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); | |
3033 } | |
3034 | |
3035 void LIR_Assembler::membar_acquire() { | |
3036 // no-op on TSO | |
3037 } | |
3038 | |
3039 void LIR_Assembler::membar_release() { | |
3040 // no-op on TSO | |
3041 } | |
3042 | |
3043 // Macro to Pack two sequential registers containing 32 bit values | |
3044 // into a single 64 bit register. | |
3045 // rs and rs->successor() are packed into rd | |
3046 // rd and rs may be the same register. | |
3047 // Note: rs and rs->successor() are destroyed. | |
3048 void LIR_Assembler::pack64( Register rs, Register rd ) { | |
3049 __ sllx(rs, 32, rs); | |
3050 __ srl(rs->successor(), 0, rs->successor()); | |
3051 __ or3(rs, rs->successor(), rd); | |
3052 } | |
3053 | |
3054 // Macro to unpack a 64 bit value in a register into | |
3055 // two sequential registers. | |
3056 // rd is unpacked into rd and rd->successor() | |
3057 void LIR_Assembler::unpack64( Register rd ) { | |
3058 __ mov(rd, rd->successor()); | |
3059 __ srax(rd, 32, rd); | |
3060 __ sra(rd->successor(), 0, rd->successor()); | |
3061 } | |
3062 | |
3063 | |
3064 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { | |
3065 LIR_Address* addr = addr_opr->as_address_ptr(); | |
3066 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet"); | |
3067 __ add(addr->base()->as_register(), addr->disp(), dest->as_register()); | |
3068 } | |
3069 | |
3070 | |
3071 void LIR_Assembler::get_thread(LIR_Opr result_reg) { | |
3072 assert(result_reg->is_register(), "check"); | |
3073 __ mov(G2_thread, result_reg->as_register()); | |
3074 } | |
3075 | |
3076 | |
3077 void LIR_Assembler::peephole(LIR_List* lir) { | |
3078 LIR_OpList* inst = lir->instructions_list(); | |
3079 for (int i = 0; i < inst->length(); i++) { | |
3080 LIR_Op* op = inst->at(i); | |
3081 switch (op->code()) { | |
3082 case lir_cond_float_branch: | |
3083 case lir_branch: { | |
3084 LIR_OpBranch* branch = op->as_OpBranch(); | |
3085 assert(branch->info() == NULL, "shouldn't be state on branches anymore"); | |
3086 LIR_Op* delay_op = NULL; | |
3087 // we'd like to be able to pull following instructions into | |
3088 // this slot but we don't know enough to do it safely yet so | |
3089 // only optimize block to block control flow. | |
3090 if (LIRFillDelaySlots && branch->block()) { | |
3091 LIR_Op* prev = inst->at(i - 1); | |
3092 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) { | |
3093 // swap previous instruction into delay slot | |
3094 inst->at_put(i - 1, op); | |
3095 inst->at_put(i, new LIR_OpDelay(prev, op->info())); | |
3096 #ifndef PRODUCT | |
3097 if (LIRTracePeephole) { | |
3098 tty->print_cr("delayed"); | |
3099 inst->at(i - 1)->print(); | |
3100 inst->at(i)->print(); | |
3101 } | |
3102 #endif | |
3103 continue; | |
3104 } | |
3105 } | |
3106 | |
3107 if (!delay_op) { | |
3108 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL); | |
3109 } | |
3110 inst->insert_before(i + 1, delay_op); | |
3111 break; | |
3112 } | |
3113 case lir_static_call: | |
3114 case lir_virtual_call: | |
3115 case lir_icvirtual_call: | |
3116 case lir_optvirtual_call: { | |
3117 LIR_Op* delay_op = NULL; | |
3118 LIR_Op* prev = inst->at(i - 1); | |
3119 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL && | |
3120 (op->code() != lir_virtual_call || | |
3121 !prev->result_opr()->is_single_cpu() || | |
3122 prev->result_opr()->as_register() != O0) && | |
3123 LIR_Assembler::is_single_instruction(prev)) { | |
3124 // Only moves without info can be put into the delay slot. | |
3125 // Also don't allow the setup of the receiver in the delay | |
3126 // slot for vtable calls. | |
3127 inst->at_put(i - 1, op); | |
3128 inst->at_put(i, new LIR_OpDelay(prev, op->info())); | |
3129 #ifndef PRODUCT | |
3130 if (LIRTracePeephole) { | |
3131 tty->print_cr("delayed"); | |
3132 inst->at(i - 1)->print(); | |
3133 inst->at(i)->print(); | |
3134 } | |
3135 #endif | |
3136 continue; | |
3137 } | |
3138 | |
3139 if (!delay_op) { | |
3140 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); | |
3141 inst->insert_before(i + 1, delay_op); | |
3142 } | |
3143 break; | |
3144 } | |
3145 } | |
3146 } | |
3147 } | |
3148 | |
3149 | |
3150 | |
3151 | |
3152 #undef __ |