Mercurial > hg > graal-compiler
annotate src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp @ 6812:988bf00cc564
7200261: G1: Liveness counting inconsistencies during marking verification
Summary: The clipping code in the routine that sets the bits for a range of cards, in the liveness accounting verification code was incorrect. It set all the bits in the card bitmap from the given starting index which would lead to spurious marking verification failures.
Reviewed-by: brutisso, jwilhelm, jmasa
author | johnc |
---|---|
date | Thu, 27 Sep 2012 15:44:01 -0700 |
parents | 8a02ca5e5576 |
children | 7eca5de9e0b6 |
rev | line source |
---|---|
0 | 1 /* |
6057 | 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1378
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1378
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1378
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "c1/c1_Compilation.hpp" | |
27 #include "c1/c1_LIRAssembler.hpp" | |
28 #include "c1/c1_MacroAssembler.hpp" | |
29 #include "c1/c1_Runtime1.hpp" | |
30 #include "c1/c1_ValueStack.hpp" | |
31 #include "ci/ciArrayKlass.hpp" | |
32 #include "ci/ciInstance.hpp" | |
33 #include "gc_interface/collectedHeap.hpp" | |
34 #include "memory/barrierSet.hpp" | |
35 #include "memory/cardTableModRefBS.hpp" | |
36 #include "nativeInst_sparc.hpp" | |
37 #include "oops/objArrayKlass.hpp" | |
38 #include "runtime/sharedRuntime.hpp" | |
0 | 39 |
40 #define __ _masm-> | |
41 | |
42 | |
43 //------------------------------------------------------------ | |
44 | |
45 | |
46 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { | |
47 if (opr->is_constant()) { | |
48 LIR_Const* constant = opr->as_constant_ptr(); | |
49 switch (constant->type()) { | |
50 case T_INT: { | |
51 jint value = constant->as_jint(); | |
52 return Assembler::is_simm13(value); | |
53 } | |
54 | |
55 default: | |
56 return false; | |
57 } | |
58 } | |
59 return false; | |
60 } | |
61 | |
62 | |
63 bool LIR_Assembler::is_single_instruction(LIR_Op* op) { | |
64 switch (op->code()) { | |
65 case lir_null_check: | |
66 return true; | |
67 | |
68 | |
69 case lir_add: | |
70 case lir_ushr: | |
71 case lir_shr: | |
72 case lir_shl: | |
73 // integer shifts and adds are always one instruction | |
74 return op->result_opr()->is_single_cpu(); | |
75 | |
76 | |
77 case lir_move: { | |
78 LIR_Op1* op1 = op->as_Op1(); | |
79 LIR_Opr src = op1->in_opr(); | |
80 LIR_Opr dst = op1->result_opr(); | |
81 | |
82 if (src == dst) { | |
83 NEEDS_CLEANUP; | |
84 // this works around a problem where moves with the same src and dst | |
85 // end up in the delay slot and then the assembler swallows the mov | |
86 // since it has no effect and then it complains because the delay slot | |
87 // is empty. returning false stops the optimizer from putting this in | |
88 // the delay slot | |
89 return false; | |
90 } | |
91 | |
92 // don't put moves involving oops into the delay slot since the VerifyOops code | |
93 // will make it much larger than a single instruction. | |
94 if (VerifyOops) { | |
95 return false; | |
96 } | |
97 | |
98 if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none || | |
99 ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) { | |
100 return false; | |
101 } | |
102 | |
2002 | 103 if (UseCompressedOops) { |
104 if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false; | |
105 if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false; | |
106 } | |
107 | |
0 | 108 if (dst->is_register()) { |
109 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) { | |
110 return !PatchALot; | |
111 } else if (src->is_single_stack()) { | |
112 return true; | |
113 } | |
114 } | |
115 | |
116 if (src->is_register()) { | |
117 if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) { | |
118 return !PatchALot; | |
119 } else if (dst->is_single_stack()) { | |
120 return true; | |
121 } | |
122 } | |
123 | |
124 if (dst->is_register() && | |
125 ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) || | |
126 (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) { | |
127 return true; | |
128 } | |
129 | |
130 return false; | |
131 } | |
132 | |
133 default: | |
134 return false; | |
135 } | |
136 ShouldNotReachHere(); | |
137 } | |
138 | |
139 | |
140 LIR_Opr LIR_Assembler::receiverOpr() { | |
141 return FrameMap::O0_oop_opr; | |
142 } | |
143 | |
144 | |
145 LIR_Opr LIR_Assembler::osrBufferPointer() { | |
146 return FrameMap::I0_opr; | |
147 } | |
148 | |
149 | |
150 int LIR_Assembler::initial_frame_size_in_bytes() { | |
151 return in_bytes(frame_map()->framesize_in_bytes()); | |
152 } | |
153 | |
154 | |
155 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5); | |
156 // we fetch the class of the receiver (O0) and compare it with the cached class. | |
157 // If they do not match we jump to slow case. | |
158 int LIR_Assembler::check_icache() { | |
159 int offset = __ offset(); | |
160 __ inline_cache_check(O0, G5_inline_cache_reg); | |
161 return offset; | |
162 } | |
163 | |
164 | |
165 void LIR_Assembler::osr_entry() { | |
166 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp): | |
167 // | |
168 // 1. Create a new compiled activation. | |
169 // 2. Initialize local variables in the compiled activation. The expression stack must be empty | |
170 // at the osr_bci; it is not initialized. | |
171 // 3. Jump to the continuation address in compiled code to resume execution. | |
172 | |
173 // OSR entry point | |
174 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); | |
175 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); | |
176 ValueStack* entry_state = osr_entry->end()->state(); | |
177 int number_of_locks = entry_state->locks_size(); | |
178 | |
179 // Create a frame for the compiled activation. | |
180 __ build_frame(initial_frame_size_in_bytes()); | |
181 | |
182 // OSR buffer is | |
183 // | |
184 // locals[nlocals-1..0] | |
185 // monitors[number_of_locks-1..0] | |
186 // | |
187 // locals is a direct copy of the interpreter frame so in the osr buffer | |
188 // so first slot in the local array is the last local from the interpreter | |
189 // and last slot is local[0] (receiver) from the interpreter | |
190 // | |
191 // Similarly with locks. The first lock slot in the osr buffer is the nth lock | |
192 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock | |
193 // in the interpreter frame (the method lock if a sync method) | |
194 | |
195 // Initialize monitors in the compiled activation. | |
196 // I0: pointer to osr buffer | |
197 // | |
198 // All other registers are dead at this point and the locals will be | |
199 // copied into place by code emitted in the IR. | |
200 | |
201 Register OSR_buf = osrBufferPointer()->as_register(); | |
202 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); | |
203 int monitor_offset = BytesPerWord * method()->max_locals() + | |
1060 | 204 (2 * BytesPerWord) * (number_of_locks - 1); |
205 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in | |
206 // the OSR buffer using 2 word entries: first the lock and then | |
207 // the oop. | |
0 | 208 for (int i = 0; i < number_of_locks; i++) { |
1060 | 209 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); |
0 | 210 #ifdef ASSERT |
211 // verify the interpreter's monitor has a non-null object | |
212 { | |
213 Label L; | |
1060 | 214 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); |
3839 | 215 __ cmp_and_br_short(O7, G0, Assembler::notEqual, Assembler::pt, L); |
0 | 216 __ stop("locked object is NULL"); |
217 __ bind(L); | |
218 } | |
219 #endif // ASSERT | |
220 // Copy the lock field into the compiled activation. | |
1060 | 221 __ ld_ptr(OSR_buf, slot_offset + 0, O7); |
0 | 222 __ st_ptr(O7, frame_map()->address_for_monitor_lock(i)); |
1060 | 223 __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7); |
0 | 224 __ st_ptr(O7, frame_map()->address_for_monitor_object(i)); |
225 } | |
226 } | |
227 } | |
228 | |
229 | |
230 // Optimized Library calls | |
231 // This is the fast version of java.lang.String.compare; it has not | |
232 // OSR-entry and therefore, we generate a slow version for OSR's | |
233 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) { | |
234 Register str0 = left->as_register(); | |
235 Register str1 = right->as_register(); | |
236 | |
237 Label Ldone; | |
238 | |
239 Register result = dst->as_register(); | |
240 { | |
6057 | 241 // Get a pointer to the first character of string0 in tmp0 |
242 // and get string0.length() in str0 | |
243 // Get a pointer to the first character of string1 in tmp1 | |
244 // and get string1.length() in str1 | |
245 // Also, get string0.length()-string1.length() in | |
246 // o7 and get the condition code set | |
0 | 247 // Note: some instructions have been hoisted for better instruction scheduling |
248 | |
249 Register tmp0 = L0; | |
250 Register tmp1 = L1; | |
251 Register tmp2 = L2; | |
252 | |
253 int value_offset = java_lang_String:: value_offset_in_bytes(); // char array | |
6057 | 254 if (java_lang_String::has_offset_field()) { |
255 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position | |
256 int count_offset = java_lang_String:: count_offset_in_bytes(); | |
257 __ load_heap_oop(str0, value_offset, tmp0); | |
258 __ ld(str0, offset_offset, tmp2); | |
259 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); | |
260 __ ld(str0, count_offset, str0); | |
261 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); | |
262 } else { | |
263 __ load_heap_oop(str0, value_offset, tmp1); | |
264 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); | |
265 __ ld(tmp1, arrayOopDesc::length_offset_in_bytes(), str0); | |
266 } | |
0 | 267 |
268 // str1 may be null | |
269 add_debug_info_for_null_check_here(info); | |
270 | |
6057 | 271 if (java_lang_String::has_offset_field()) { |
272 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position | |
273 int count_offset = java_lang_String:: count_offset_in_bytes(); | |
274 __ load_heap_oop(str1, value_offset, tmp1); | |
275 __ add(tmp0, tmp2, tmp0); | |
276 | |
277 __ ld(str1, offset_offset, tmp2); | |
278 __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); | |
279 __ ld(str1, count_offset, str1); | |
280 __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); | |
281 __ add(tmp1, tmp2, tmp1); | |
282 } else { | |
283 __ load_heap_oop(str1, value_offset, tmp2); | |
284 __ add(tmp2, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); | |
285 __ ld(tmp2, arrayOopDesc::length_offset_in_bytes(), str1); | |
286 } | |
0 | 287 __ subcc(str0, str1, O7); |
288 } | |
289 | |
290 { | |
291 // Compute the minimum of the string lengths, scale it and store it in limit | |
292 Register count0 = I0; | |
293 Register count1 = I1; | |
294 Register limit = L3; | |
295 | |
296 Label Lskip; | |
297 __ sll(count0, exact_log2(sizeof(jchar)), limit); // string0 is shorter | |
298 __ br(Assembler::greater, true, Assembler::pt, Lskip); | |
299 __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit); // string1 is shorter | |
300 __ bind(Lskip); | |
301 | |
302 // If either string is empty (or both of them) the result is the difference in lengths | |
303 __ cmp(limit, 0); | |
304 __ br(Assembler::equal, true, Assembler::pn, Ldone); | |
305 __ delayed()->mov(O7, result); // result is difference in lengths | |
306 } | |
307 | |
308 { | |
309 // Neither string is empty | |
310 Label Lloop; | |
311 | |
312 Register base0 = L0; | |
313 Register base1 = L1; | |
314 Register chr0 = I0; | |
315 Register chr1 = I1; | |
316 Register limit = L3; | |
317 | |
318 // Shift base0 and base1 to the end of the arrays, negate limit | |
319 __ add(base0, limit, base0); | |
320 __ add(base1, limit, base1); | |
6057 | 321 __ neg(limit); // limit = -min{string0.length(), string1.length()} |
0 | 322 |
323 __ lduh(base0, limit, chr0); | |
324 __ bind(Lloop); | |
325 __ lduh(base1, limit, chr1); | |
326 __ subcc(chr0, chr1, chr0); | |
327 __ br(Assembler::notZero, false, Assembler::pn, Ldone); | |
328 assert(chr0 == result, "result must be pre-placed"); | |
329 __ delayed()->inccc(limit, sizeof(jchar)); | |
330 __ br(Assembler::notZero, true, Assembler::pt, Lloop); | |
331 __ delayed()->lduh(base0, limit, chr0); | |
332 } | |
333 | |
334 // If strings are equal up to min length, return the length difference. | |
335 __ mov(O7, result); | |
336 | |
337 // Otherwise, return the difference between the first mismatched chars. | |
338 __ bind(Ldone); | |
339 } | |
340 | |
341 | |
342 // -------------------------------------------------------------------------------------------- | |
343 | |
344 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) { | |
345 if (!GenerateSynchronizationCode) return; | |
346 | |
347 Register obj_reg = obj_opr->as_register(); | |
348 Register lock_reg = lock_opr->as_register(); | |
349 | |
350 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); | |
351 Register reg = mon_addr.base(); | |
352 int offset = mon_addr.disp(); | |
353 // compute pointer to BasicLock | |
354 if (mon_addr.is_simm13()) { | |
355 __ add(reg, offset, lock_reg); | |
356 } | |
357 else { | |
358 __ set(offset, lock_reg); | |
359 __ add(reg, lock_reg, lock_reg); | |
360 } | |
361 // unlock object | |
362 MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no); | |
363 // _slow_case_stubs->append(slow_case); | |
364 // temporary fix: must be created after exceptionhandler, therefore as call stub | |
365 _slow_case_stubs->append(slow_case); | |
366 if (UseFastLocking) { | |
367 // try inlined fast unlocking first, revert to slow locking if it fails | |
368 // note: lock_reg points to the displaced header since the displaced header offset is 0! | |
369 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); | |
370 __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry()); | |
371 } else { | |
372 // always do slow unlocking | |
373 // note: the slow unlocking code could be inlined here, however if we use | |
374 // slow unlocking, speed doesn't matter anyway and this solution is | |
375 // simpler and requires less duplicated code - additionally, the | |
376 // slow unlocking code is the same in either case which simplifies | |
377 // debugging | |
378 __ br(Assembler::always, false, Assembler::pt, *slow_case->entry()); | |
379 __ delayed()->nop(); | |
380 } | |
381 // done | |
382 __ bind(*slow_case->continuation()); | |
383 } | |
384 | |
385 | |
1204 | 386 int LIR_Assembler::emit_exception_handler() { |
0 | 387 // if the last instruction is a call (typically to do a throw which |
388 // is coming at the end after block reordering) the return address | |
389 // must still point into the code area in order to avoid assertion | |
390 // failures when searching for the corresponding bci => add a nop | |
391 // (was bug 5/14/1999 - gri) | |
392 __ nop(); | |
393 | |
394 // generate code for exception handler | |
395 ciMethod* method = compilation()->method(); | |
396 | |
397 address handler_base = __ start_a_stub(exception_handler_size); | |
398 | |
399 if (handler_base == NULL) { | |
400 // not enough space left for the handler | |
401 bailout("exception handler overflow"); | |
1204 | 402 return -1; |
0 | 403 } |
1204 | 404 |
0 | 405 int offset = code_offset(); |
406 | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2112
diff
changeset
|
407 __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); |
0 | 408 __ delayed()->nop(); |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2112
diff
changeset
|
409 __ should_not_reach_here(); |
4808
898522ae3c32
7131288: COMPILE SKIPPED: deopt handler overflow (retry at different tier)
iveresov
parents:
4771
diff
changeset
|
410 guarantee(code_offset() - offset <= exception_handler_size, "overflow"); |
0 | 411 __ end_a_stub(); |
1204 | 412 |
413 return offset; | |
0 | 414 } |
415 | |
1204 | 416 |
1378
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
417 // Emit the code to remove the frame from the stack in the exception |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
418 // unwind path. |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
419 int LIR_Assembler::emit_unwind_handler() { |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
420 #ifndef PRODUCT |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
421 if (CommentedAssembly) { |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
422 _masm->block_comment("Unwind handler"); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
423 } |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
424 #endif |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
425 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
426 int offset = code_offset(); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
427 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
428 // Fetch the exception from TLS and clear out exception related thread state |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
429 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
430 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
431 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset())); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
432 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
433 __ bind(_unwind_handler_entry); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
434 __ verify_not_null_oop(O0); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
435 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
436 __ mov(O0, I0); // Preserve the exception |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
437 } |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
438 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
439 // Preform needed unlocking |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
440 MonitorExitStub* stub = NULL; |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
441 if (method()->is_synchronized()) { |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
442 monitor_address(0, FrameMap::I1_opr); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
443 stub = new MonitorExitStub(FrameMap::I1_opr, true, 0); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
444 __ unlock_object(I3, I2, I1, *stub->entry()); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
445 __ bind(*stub->continuation()); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
446 } |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
447 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
448 if (compilation()->env()->dtrace_method_probes()) { |
1830
a3f7f95b0165
6988018: dtrace/hotspot/MethodInvocation/MethodInvocation002 crashes with client compiler
never
parents:
1791
diff
changeset
|
449 __ mov(G2_thread, O0); |
6739
8a02ca5e5576
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
roland
parents:
6725
diff
changeset
|
450 __ save_thread(I1); // need to preserve thread in G2 across |
8a02ca5e5576
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
roland
parents:
6725
diff
changeset
|
451 // runtime call |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
452 metadata2reg(method()->constant_encoding(), O1); |
1378
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
453 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
454 __ delayed()->nop(); |
6739
8a02ca5e5576
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
roland
parents:
6725
diff
changeset
|
455 __ restore_thread(I1); |
1378
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
456 } |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
457 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
458 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
459 __ mov(I0, O0); // Restore the exception |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
460 } |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
461 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
462 // dispatch to the unwind logic |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
463 __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
464 __ delayed()->nop(); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
465 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
466 // Emit the slow path assembly |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
467 if (stub != NULL) { |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
468 stub->emit_code(this); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
469 } |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
470 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
471 return offset; |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
472 } |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
473 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
474 |
1204 | 475 int LIR_Assembler::emit_deopt_handler() { |
0 | 476 // if the last instruction is a call (typically to do a throw which |
477 // is coming at the end after block reordering) the return address | |
478 // must still point into the code area in order to avoid assertion | |
479 // failures when searching for the corresponding bci => add a nop | |
480 // (was bug 5/14/1999 - gri) | |
481 __ nop(); | |
482 | |
483 // generate code for deopt handler | |
484 ciMethod* method = compilation()->method(); | |
485 address handler_base = __ start_a_stub(deopt_handler_size); | |
486 if (handler_base == NULL) { | |
487 // not enough space left for the handler | |
488 bailout("deopt handler overflow"); | |
1204 | 489 return -1; |
0 | 490 } |
1204 | 491 |
0 | 492 int offset = code_offset(); |
727 | 493 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); |
494 __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp | |
0 | 495 __ delayed()->nop(); |
4808
898522ae3c32
7131288: COMPILE SKIPPED: deopt handler overflow (retry at different tier)
iveresov
parents:
4771
diff
changeset
|
496 guarantee(code_offset() - offset <= deopt_handler_size, "overflow"); |
0 | 497 __ end_a_stub(); |
1204 | 498 |
499 return offset; | |
0 | 500 } |
501 | |
502 | |
503 void LIR_Assembler::jobject2reg(jobject o, Register reg) { | |
504 if (o == NULL) { | |
505 __ set(NULL_WORD, reg); | |
506 } else { | |
507 int oop_index = __ oop_recorder()->find_index(o); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
508 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(o)), "should be real oop"); |
0 | 509 RelocationHolder rspec = oop_Relocation::spec(oop_index); |
510 __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created | |
511 } | |
512 } | |
513 | |
514 | |
515 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
516 // Allocate a new index in table to hold the object once it's been patched |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
517 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
518 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index); |
0 | 519 |
727 | 520 AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index)); |
521 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); | |
0 | 522 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the |
523 // NULL will be dynamically patched later and the patched value may be large. We must | |
524 // therefore generate the sethi/add as a placeholders | |
727 | 525 __ patchable_set(addrlit, reg); |
0 | 526 |
527 patching_epilog(patch, lir_patch_normal, reg, info); | |
528 } | |
529 | |
530 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
531 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
532 __ set_metadata_constant(o, reg); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
533 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
534 |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
535 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
536 // Allocate a new index in table to hold the klass once it's been patched |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
537 int index = __ oop_recorder()->allocate_metadata_index(NULL); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
538 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
539 AddressLiteral addrlit(NULL, metadata_Relocation::spec(index)); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
540 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
541 // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
542 // NULL will be dynamically patched later and the patched value may be large. We must |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
543 // therefore generate the sethi/add as a placeholders |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
544 __ patchable_set(addrlit, reg); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
545 |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
546 patching_epilog(patch, lir_patch_normal, reg, info); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
547 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
548 |
0 | 549 void LIR_Assembler::emit_op3(LIR_Op3* op) { |
550 Register Rdividend = op->in_opr1()->as_register(); | |
551 Register Rdivisor = noreg; | |
552 Register Rscratch = op->in_opr3()->as_register(); | |
553 Register Rresult = op->result_opr()->as_register(); | |
554 int divisor = -1; | |
555 | |
556 if (op->in_opr2()->is_register()) { | |
557 Rdivisor = op->in_opr2()->as_register(); | |
558 } else { | |
559 divisor = op->in_opr2()->as_constant_ptr()->as_jint(); | |
560 assert(Assembler::is_simm13(divisor), "can only handle simm13"); | |
561 } | |
562 | |
563 assert(Rdividend != Rscratch, ""); | |
564 assert(Rdivisor != Rscratch, ""); | |
565 assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv"); | |
566 | |
567 if (Rdivisor == noreg && is_power_of_2(divisor)) { | |
568 // convert division by a power of two into some shifts and logical operations | |
569 if (op->code() == lir_idiv) { | |
570 if (divisor == 2) { | |
571 __ srl(Rdividend, 31, Rscratch); | |
572 } else { | |
573 __ sra(Rdividend, 31, Rscratch); | |
574 __ and3(Rscratch, divisor - 1, Rscratch); | |
575 } | |
576 __ add(Rdividend, Rscratch, Rscratch); | |
577 __ sra(Rscratch, log2_intptr(divisor), Rresult); | |
578 return; | |
579 } else { | |
580 if (divisor == 2) { | |
581 __ srl(Rdividend, 31, Rscratch); | |
582 } else { | |
583 __ sra(Rdividend, 31, Rscratch); | |
584 __ and3(Rscratch, divisor - 1,Rscratch); | |
585 } | |
586 __ add(Rdividend, Rscratch, Rscratch); | |
587 __ andn(Rscratch, divisor - 1,Rscratch); | |
588 __ sub(Rdividend, Rscratch, Rresult); | |
589 return; | |
590 } | |
591 } | |
592 | |
593 __ sra(Rdividend, 31, Rscratch); | |
594 __ wry(Rscratch); | |
595 if (!VM_Version::v9_instructions_work()) { | |
596 // v9 doesn't require these nops | |
597 __ nop(); | |
598 __ nop(); | |
599 __ nop(); | |
600 __ nop(); | |
601 } | |
602 | |
603 add_debug_info_for_div0_here(op->info()); | |
604 | |
605 if (Rdivisor != noreg) { | |
606 __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch)); | |
607 } else { | |
608 assert(Assembler::is_simm13(divisor), "can only handle simm13"); | |
609 __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch)); | |
610 } | |
611 | |
612 Label skip; | |
613 __ br(Assembler::overflowSet, true, Assembler::pn, skip); | |
614 __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch)); | |
615 __ bind(skip); | |
616 | |
617 if (op->code() == lir_irem) { | |
618 if (Rdivisor != noreg) { | |
619 __ smul(Rscratch, Rdivisor, Rscratch); | |
620 } else { | |
621 __ smul(Rscratch, divisor, Rscratch); | |
622 } | |
623 __ sub(Rdividend, Rscratch, Rresult); | |
624 } | |
625 } | |
626 | |
627 | |
628 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { | |
629 #ifdef ASSERT | |
630 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); | |
631 if (op->block() != NULL) _branch_target_blocks.append(op->block()); | |
632 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); | |
633 #endif | |
634 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); | |
635 | |
636 if (op->cond() == lir_cond_always) { | |
637 __ br(Assembler::always, false, Assembler::pt, *(op->label())); | |
638 } else if (op->code() == lir_cond_float_branch) { | |
639 assert(op->ublock() != NULL, "must have unordered successor"); | |
640 bool is_unordered = (op->ublock() == op->block()); | |
641 Assembler::Condition acond; | |
642 switch (op->cond()) { | |
643 case lir_cond_equal: acond = Assembler::f_equal; break; | |
644 case lir_cond_notEqual: acond = Assembler::f_notEqual; break; | |
645 case lir_cond_less: acond = (is_unordered ? Assembler::f_unorderedOrLess : Assembler::f_less); break; | |
646 case lir_cond_greater: acond = (is_unordered ? Assembler::f_unorderedOrGreater : Assembler::f_greater); break; | |
647 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break; | |
648 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break; | |
649 default : ShouldNotReachHere(); | |
650 }; | |
651 | |
652 if (!VM_Version::v9_instructions_work()) { | |
653 __ nop(); | |
654 } | |
655 __ fb( acond, false, Assembler::pn, *(op->label())); | |
656 } else { | |
657 assert (op->code() == lir_branch, "just checking"); | |
658 | |
659 Assembler::Condition acond; | |
660 switch (op->cond()) { | |
661 case lir_cond_equal: acond = Assembler::equal; break; | |
662 case lir_cond_notEqual: acond = Assembler::notEqual; break; | |
663 case lir_cond_less: acond = Assembler::less; break; | |
664 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; | |
665 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; | |
666 case lir_cond_greater: acond = Assembler::greater; break; | |
667 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; | |
668 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; | |
669 default: ShouldNotReachHere(); | |
670 }; | |
671 | |
672 // sparc has different condition codes for testing 32-bit | |
673 // vs. 64-bit values. We could always test xcc is we could | |
674 // guarantee that 32-bit loads always sign extended but that isn't | |
675 // true and since sign extension isn't free, it would impose a | |
676 // slight cost. | |
677 #ifdef _LP64 | |
678 if (op->type() == T_INT) { | |
679 __ br(acond, false, Assembler::pn, *(op->label())); | |
680 } else | |
681 #endif | |
682 __ brx(acond, false, Assembler::pn, *(op->label())); | |
683 } | |
684 // The peephole pass fills the delay slot | |
685 } | |
686 | |
687 | |
688 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { | |
689 Bytecodes::Code code = op->bytecode(); | |
690 LIR_Opr dst = op->result_opr(); | |
691 | |
692 switch(code) { | |
693 case Bytecodes::_i2l: { | |
694 Register rlo = dst->as_register_lo(); | |
695 Register rhi = dst->as_register_hi(); | |
696 Register rval = op->in_opr()->as_register(); | |
697 #ifdef _LP64 | |
698 __ sra(rval, 0, rlo); | |
699 #else | |
700 __ mov(rval, rlo); | |
701 __ sra(rval, BitsPerInt-1, rhi); | |
702 #endif | |
703 break; | |
704 } | |
705 case Bytecodes::_i2d: | |
706 case Bytecodes::_i2f: { | |
707 bool is_double = (code == Bytecodes::_i2d); | |
708 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); | |
709 FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; | |
710 FloatRegister rsrc = op->in_opr()->as_float_reg(); | |
711 if (rsrc != rdst) { | |
712 __ fmov(FloatRegisterImpl::S, rsrc, rdst); | |
713 } | |
714 __ fitof(w, rdst, rdst); | |
715 break; | |
716 } | |
717 case Bytecodes::_f2i:{ | |
718 FloatRegister rsrc = op->in_opr()->as_float_reg(); | |
719 Address addr = frame_map()->address_for_slot(dst->single_stack_ix()); | |
720 Label L; | |
721 // result must be 0 if value is NaN; test by comparing value to itself | |
722 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc); | |
723 if (!VM_Version::v9_instructions_work()) { | |
724 __ nop(); | |
725 } | |
726 __ fb(Assembler::f_unordered, true, Assembler::pn, L); | |
727 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN | |
728 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc); | |
729 // move integer result from float register to int register | |
730 __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp()); | |
731 __ bind (L); | |
732 break; | |
733 } | |
734 case Bytecodes::_l2i: { | |
735 Register rlo = op->in_opr()->as_register_lo(); | |
736 Register rhi = op->in_opr()->as_register_hi(); | |
737 Register rdst = dst->as_register(); | |
738 #ifdef _LP64 | |
739 __ sra(rlo, 0, rdst); | |
740 #else | |
741 __ mov(rlo, rdst); | |
742 #endif | |
743 break; | |
744 } | |
745 case Bytecodes::_d2f: | |
746 case Bytecodes::_f2d: { | |
747 bool is_double = (code == Bytecodes::_f2d); | |
748 assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check"); | |
749 LIR_Opr val = op->in_opr(); | |
750 FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg(); | |
751 FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg(); | |
752 FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D; | |
753 FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S; | |
754 __ ftof(vw, dw, rval, rdst); | |
755 break; | |
756 } | |
757 case Bytecodes::_i2s: | |
758 case Bytecodes::_i2b: { | |
759 Register rval = op->in_opr()->as_register(); | |
760 Register rdst = dst->as_register(); | |
761 int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort); | |
762 __ sll (rval, shift, rdst); | |
763 __ sra (rdst, shift, rdst); | |
764 break; | |
765 } | |
766 case Bytecodes::_i2c: { | |
767 Register rval = op->in_opr()->as_register(); | |
768 Register rdst = dst->as_register(); | |
769 int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte; | |
770 __ sll (rval, shift, rdst); | |
771 __ srl (rdst, shift, rdst); | |
772 break; | |
773 } | |
774 | |
775 default: ShouldNotReachHere(); | |
776 } | |
777 } | |
778 | |
779 | |
780 void LIR_Assembler::align_call(LIR_Code) { | |
781 // do nothing since all instructions are word aligned on sparc | |
782 } | |
783 | |
784 | |
1295 | 785 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { |
786 __ call(op->addr(), rtype); | |
1564 | 787 // The peephole pass fills the delay slot, add_call_info is done in |
788 // LIR_Assembler::emit_delay. | |
0 | 789 } |
790 | |
791 | |
1295 | 792 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
793 __ ic_call(op->addr(), false); |
1564 | 794 // The peephole pass fills the delay slot, add_call_info is done in |
795 // LIR_Assembler::emit_delay. | |
0 | 796 } |
797 | |
798 | |
1295 | 799 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { |
800 add_debug_info_for_null_check_here(op->info()); | |
2002 | 801 __ load_klass(O0, G3_scratch); |
4114
6729bbc1fcd6
7003454: order constants in constant table by number of references in code
twisti
parents:
4052
diff
changeset
|
802 if (Assembler::is_simm13(op->vtable_offset())) { |
1295 | 803 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method); |
0 | 804 } else { |
805 // This will generate 2 instructions | |
1295 | 806 __ set(op->vtable_offset(), G5_method); |
0 | 807 // ld_ptr, set_hi, set |
808 __ ld_ptr(G3_scratch, G5_method, G5_method); | |
809 } | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
810 __ ld_ptr(G5_method, Method::from_compiled_offset(), G3_scratch); |
0 | 811 __ callr(G3_scratch, G0); |
812 // the peephole pass fills the delay slot | |
813 } | |
814 | |
2002 | 815 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { |
0 | 816 int store_offset; |
817 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { | |
818 assert(!unaligned, "can't handle this"); | |
819 // for offsets larger than a simm13 we setup the offset in O7 | |
727 | 820 __ set(offset, O7); |
2002 | 821 store_offset = store(from_reg, base, O7, type, wide); |
0 | 822 } else { |
2002 | 823 if (type == T_ARRAY || type == T_OBJECT) { |
824 __ verify_oop(from_reg->as_register()); | |
825 } | |
0 | 826 store_offset = code_offset(); |
827 switch (type) { | |
828 case T_BOOLEAN: // fall through | |
829 case T_BYTE : __ stb(from_reg->as_register(), base, offset); break; | |
830 case T_CHAR : __ sth(from_reg->as_register(), base, offset); break; | |
831 case T_SHORT : __ sth(from_reg->as_register(), base, offset); break; | |
832 case T_INT : __ stw(from_reg->as_register(), base, offset); break; | |
833 case T_LONG : | |
834 #ifdef _LP64 | |
835 if (unaligned || PatchALot) { | |
836 __ srax(from_reg->as_register_lo(), 32, O7); | |
837 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); | |
838 __ stw(O7, base, offset + hi_word_offset_in_bytes); | |
839 } else { | |
840 __ stx(from_reg->as_register_lo(), base, offset); | |
841 } | |
842 #else | |
843 assert(Assembler::is_simm13(offset + 4), "must be"); | |
844 __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); | |
845 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes); | |
846 #endif | |
847 break; | |
2002 | 848 case T_ADDRESS: |
6739
8a02ca5e5576
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
roland
parents:
6725
diff
changeset
|
849 case T_METADATA: |
2002 | 850 __ st_ptr(from_reg->as_register(), base, offset); |
851 break; | |
0 | 852 case T_ARRAY : // fall through |
2002 | 853 case T_OBJECT: |
854 { | |
855 if (UseCompressedOops && !wide) { | |
856 __ encode_heap_oop(from_reg->as_register(), G3_scratch); | |
857 store_offset = code_offset(); | |
858 __ stw(G3_scratch, base, offset); | |
859 } else { | |
860 __ st_ptr(from_reg->as_register(), base, offset); | |
861 } | |
862 break; | |
863 } | |
864 | |
0 | 865 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break; |
866 case T_DOUBLE: | |
867 { | |
868 FloatRegister reg = from_reg->as_double_reg(); | |
869 // split unaligned stores | |
870 if (unaligned || PatchALot) { | |
871 assert(Assembler::is_simm13(offset + 4), "must be"); | |
872 __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4); | |
873 __ stf(FloatRegisterImpl::S, reg, base, offset); | |
874 } else { | |
875 __ stf(FloatRegisterImpl::D, reg, base, offset); | |
876 } | |
877 break; | |
878 } | |
879 default : ShouldNotReachHere(); | |
880 } | |
881 } | |
882 return store_offset; | |
883 } | |
884 | |
885 | |
2002 | 886 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { |
887 if (type == T_ARRAY || type == T_OBJECT) { | |
888 __ verify_oop(from_reg->as_register()); | |
889 } | |
0 | 890 int store_offset = code_offset(); |
891 switch (type) { | |
892 case T_BOOLEAN: // fall through | |
893 case T_BYTE : __ stb(from_reg->as_register(), base, disp); break; | |
894 case T_CHAR : __ sth(from_reg->as_register(), base, disp); break; | |
895 case T_SHORT : __ sth(from_reg->as_register(), base, disp); break; | |
896 case T_INT : __ stw(from_reg->as_register(), base, disp); break; | |
897 case T_LONG : | |
898 #ifdef _LP64 | |
899 __ stx(from_reg->as_register_lo(), base, disp); | |
900 #else | |
901 assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match"); | |
902 __ std(from_reg->as_register_hi(), base, disp); | |
903 #endif | |
904 break; | |
2002 | 905 case T_ADDRESS: |
906 __ st_ptr(from_reg->as_register(), base, disp); | |
907 break; | |
0 | 908 case T_ARRAY : // fall through |
2002 | 909 case T_OBJECT: |
910 { | |
911 if (UseCompressedOops && !wide) { | |
912 __ encode_heap_oop(from_reg->as_register(), G3_scratch); | |
913 store_offset = code_offset(); | |
914 __ stw(G3_scratch, base, disp); | |
915 } else { | |
916 __ st_ptr(from_reg->as_register(), base, disp); | |
917 } | |
918 break; | |
919 } | |
0 | 920 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break; |
921 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break; | |
922 default : ShouldNotReachHere(); | |
923 } | |
924 return store_offset; | |
925 } | |
926 | |
927 | |
2002 | 928 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { |
0 | 929 int load_offset; |
930 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { | |
931 assert(base != O7, "destroying register"); | |
932 assert(!unaligned, "can't handle this"); | |
933 // for offsets larger than a simm13 we setup the offset in O7 | |
727 | 934 __ set(offset, O7); |
2002 | 935 load_offset = load(base, O7, to_reg, type, wide); |
0 | 936 } else { |
937 load_offset = code_offset(); | |
938 switch(type) { | |
939 case T_BOOLEAN: // fall through | |
940 case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break; | |
941 case T_CHAR : __ lduh(base, offset, to_reg->as_register()); break; | |
942 case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break; | |
943 case T_INT : __ ld(base, offset, to_reg->as_register()); break; | |
944 case T_LONG : | |
945 if (!unaligned) { | |
946 #ifdef _LP64 | |
947 __ ldx(base, offset, to_reg->as_register_lo()); | |
948 #else | |
949 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), | |
950 "must be sequential"); | |
951 __ ldd(base, offset, to_reg->as_register_hi()); | |
952 #endif | |
953 } else { | |
954 #ifdef _LP64 | |
955 assert(base != to_reg->as_register_lo(), "can't handle this"); | |
1060 | 956 assert(O7 != to_reg->as_register_lo(), "can't handle this"); |
0 | 957 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo()); |
1060 | 958 __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last |
0 | 959 __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo()); |
1060 | 960 __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo()); |
0 | 961 #else |
962 if (base == to_reg->as_register_lo()) { | |
963 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); | |
964 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); | |
965 } else { | |
966 __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo()); | |
967 __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); | |
968 } | |
969 #endif | |
970 } | |
971 break; | |
6739
8a02ca5e5576
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
roland
parents:
6725
diff
changeset
|
972 case T_METADATA: |
2002 | 973 case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break; |
0 | 974 case T_ARRAY : // fall through |
2002 | 975 case T_OBJECT: |
976 { | |
977 if (UseCompressedOops && !wide) { | |
978 __ lduw(base, offset, to_reg->as_register()); | |
979 __ decode_heap_oop(to_reg->as_register()); | |
980 } else { | |
981 __ ld_ptr(base, offset, to_reg->as_register()); | |
982 } | |
983 break; | |
984 } | |
0 | 985 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break; |
986 case T_DOUBLE: | |
987 { | |
988 FloatRegister reg = to_reg->as_double_reg(); | |
989 // split unaligned loads | |
990 if (unaligned || PatchALot) { | |
1060 | 991 __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor()); |
992 __ ldf(FloatRegisterImpl::S, base, offset, reg); | |
0 | 993 } else { |
994 __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg()); | |
995 } | |
996 break; | |
997 } | |
998 default : ShouldNotReachHere(); | |
999 } | |
2002 | 1000 if (type == T_ARRAY || type == T_OBJECT) { |
1001 __ verify_oop(to_reg->as_register()); | |
1002 } | |
0 | 1003 } |
1004 return load_offset; | |
1005 } | |
1006 | |
1007 | |
2002 | 1008 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { |
0 | 1009 int load_offset = code_offset(); |
1010 switch(type) { | |
1011 case T_BOOLEAN: // fall through | |
2002 | 1012 case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; |
1013 case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; | |
1014 case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; | |
1015 case T_INT : __ ld(base, disp, to_reg->as_register()); break; | |
1016 case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break; | |
0 | 1017 case T_ARRAY : // fall through |
2002 | 1018 case T_OBJECT: |
1019 { | |
1020 if (UseCompressedOops && !wide) { | |
1021 __ lduw(base, disp, to_reg->as_register()); | |
1022 __ decode_heap_oop(to_reg->as_register()); | |
1023 } else { | |
1024 __ ld_ptr(base, disp, to_reg->as_register()); | |
1025 } | |
1026 break; | |
1027 } | |
0 | 1028 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; |
1029 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; | |
1030 case T_LONG : | |
1031 #ifdef _LP64 | |
1032 __ ldx(base, disp, to_reg->as_register_lo()); | |
1033 #else | |
1034 assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(), | |
1035 "must be sequential"); | |
1036 __ ldd(base, disp, to_reg->as_register_hi()); | |
1037 #endif | |
1038 break; | |
1039 default : ShouldNotReachHere(); | |
1040 } | |
2002 | 1041 if (type == T_ARRAY || type == T_OBJECT) { |
1042 __ verify_oop(to_reg->as_register()); | |
1043 } | |
0 | 1044 return load_offset; |
1045 } | |
1046 | |
1047 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { | |
1048 LIR_Const* c = src->as_constant_ptr(); | |
1049 switch (c->type()) { | |
1050 case T_INT: | |
2002 | 1051 case T_FLOAT: { |
1052 Register src_reg = O7; | |
1053 int value = c->as_jint_bits(); | |
1054 if (value == 0) { | |
1055 src_reg = G0; | |
1056 } else { | |
1057 __ set(value, O7); | |
1058 } | |
1059 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); | |
1060 __ stw(src_reg, addr.base(), addr.disp()); | |
1061 break; | |
1062 } | |
1297
c466efa608d5
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
1295
diff
changeset
|
1063 case T_ADDRESS: { |
0 | 1064 Register src_reg = O7; |
1065 int value = c->as_jint_bits(); | |
1066 if (value == 0) { | |
1067 src_reg = G0; | |
1068 } else { | |
1069 __ set(value, O7); | |
1070 } | |
1071 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); | |
2002 | 1072 __ st_ptr(src_reg, addr.base(), addr.disp()); |
0 | 1073 break; |
1074 } | |
1075 case T_OBJECT: { | |
1076 Register src_reg = O7; | |
1077 jobject2reg(c->as_jobject(), src_reg); | |
1078 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); | |
1079 __ st_ptr(src_reg, addr.base(), addr.disp()); | |
1080 break; | |
1081 } | |
1082 case T_LONG: | |
1083 case T_DOUBLE: { | |
1084 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); | |
1085 | |
1086 Register tmp = O7; | |
1087 int value_lo = c->as_jint_lo_bits(); | |
1088 if (value_lo == 0) { | |
1089 tmp = G0; | |
1090 } else { | |
1091 __ set(value_lo, O7); | |
1092 } | |
1093 __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes); | |
1094 int value_hi = c->as_jint_hi_bits(); | |
1095 if (value_hi == 0) { | |
1096 tmp = G0; | |
1097 } else { | |
1098 __ set(value_hi, O7); | |
1099 } | |
1100 __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes); | |
1101 break; | |
1102 } | |
1103 default: | |
1104 Unimplemented(); | |
1105 } | |
1106 } | |
1107 | |
1108 | |
2002 | 1109 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { |
0 | 1110 LIR_Const* c = src->as_constant_ptr(); |
1111 LIR_Address* addr = dest->as_address_ptr(); | |
1112 Register base = addr->base()->as_pointer_register(); | |
2002 | 1113 int offset = -1; |
1114 | |
0 | 1115 switch (c->type()) { |
1116 case T_INT: | |
1297
c466efa608d5
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
1295
diff
changeset
|
1117 case T_FLOAT: |
c466efa608d5
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
1295
diff
changeset
|
1118 case T_ADDRESS: { |
0 | 1119 LIR_Opr tmp = FrameMap::O7_opr; |
1120 int value = c->as_jint_bits(); | |
1121 if (value == 0) { | |
1122 tmp = FrameMap::G0_opr; | |
1123 } else if (Assembler::is_simm13(value)) { | |
1124 __ set(value, O7); | |
1125 } | |
1126 if (addr->index()->is_valid()) { | |
1127 assert(addr->disp() == 0, "must be zero"); | |
2002 | 1128 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); |
0 | 1129 } else { |
1130 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); | |
2002 | 1131 offset = store(tmp, base, addr->disp(), type, wide, false); |
0 | 1132 } |
1133 break; | |
1134 } | |
1135 case T_LONG: | |
1136 case T_DOUBLE: { | |
1137 assert(!addr->index()->is_valid(), "can't handle reg reg address here"); | |
1138 assert(Assembler::is_simm13(addr->disp()) && | |
1139 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses"); | |
1140 | |
2002 | 1141 LIR_Opr tmp = FrameMap::O7_opr; |
0 | 1142 int value_lo = c->as_jint_lo_bits(); |
1143 if (value_lo == 0) { | |
2002 | 1144 tmp = FrameMap::G0_opr; |
0 | 1145 } else { |
1146 __ set(value_lo, O7); | |
1147 } | |
2002 | 1148 offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false); |
0 | 1149 int value_hi = c->as_jint_hi_bits(); |
1150 if (value_hi == 0) { | |
2002 | 1151 tmp = FrameMap::G0_opr; |
0 | 1152 } else { |
1153 __ set(value_hi, O7); | |
1154 } | |
4052 | 1155 store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false); |
0 | 1156 break; |
1157 } | |
1158 case T_OBJECT: { | |
1159 jobject obj = c->as_jobject(); | |
1160 LIR_Opr tmp; | |
1161 if (obj == NULL) { | |
1162 tmp = FrameMap::G0_opr; | |
1163 } else { | |
1164 tmp = FrameMap::O7_opr; | |
1165 jobject2reg(c->as_jobject(), O7); | |
1166 } | |
1167 // handle either reg+reg or reg+disp address | |
1168 if (addr->index()->is_valid()) { | |
1169 assert(addr->disp() == 0, "must be zero"); | |
2002 | 1170 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); |
0 | 1171 } else { |
1172 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); | |
2002 | 1173 offset = store(tmp, base, addr->disp(), type, wide, false); |
0 | 1174 } |
1175 | |
1176 break; | |
1177 } | |
1178 default: | |
1179 Unimplemented(); | |
1180 } | |
2002 | 1181 if (info != NULL) { |
1182 assert(offset != -1, "offset should've been set"); | |
1183 add_debug_info_for_null_check(offset, info); | |
1184 } | |
0 | 1185 } |
1186 | |
1187 | |
1188 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { | |
1189 LIR_Const* c = src->as_constant_ptr(); | |
1190 LIR_Opr to_reg = dest; | |
1191 | |
1192 switch (c->type()) { | |
1193 case T_INT: | |
1297
c466efa608d5
6932496: c1: deoptimization of jsr subroutine fails on sparcv9
roland
parents:
1295
diff
changeset
|
1194 case T_ADDRESS: |
0 | 1195 { |
1196 jint con = c->as_jint(); | |
1197 if (to_reg->is_single_cpu()) { | |
1198 assert(patch_code == lir_patch_none, "no patching handled here"); | |
1199 __ set(con, to_reg->as_register()); | |
1200 } else { | |
1201 ShouldNotReachHere(); | |
1202 assert(to_reg->is_single_fpu(), "wrong register kind"); | |
1203 | |
1204 __ set(con, O7); | |
727 | 1205 Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS); |
0 | 1206 __ st(O7, temp_slot); |
1207 __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg()); | |
1208 } | |
1209 } | |
1210 break; | |
1211 | |
1212 case T_LONG: | |
1213 { | |
1214 jlong con = c->as_jlong(); | |
1215 | |
1216 if (to_reg->is_double_cpu()) { | |
1217 #ifdef _LP64 | |
1218 __ set(con, to_reg->as_register_lo()); | |
1219 #else | |
1220 __ set(low(con), to_reg->as_register_lo()); | |
1221 __ set(high(con), to_reg->as_register_hi()); | |
1222 #endif | |
1223 #ifdef _LP64 | |
1224 } else if (to_reg->is_single_cpu()) { | |
1225 __ set(con, to_reg->as_register()); | |
1226 #endif | |
1227 } else { | |
1228 ShouldNotReachHere(); | |
1229 assert(to_reg->is_double_fpu(), "wrong register kind"); | |
727 | 1230 Address temp_slot_lo(SP, ((frame::register_save_words ) * wordSize) + STACK_BIAS); |
1231 Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS); | |
0 | 1232 __ set(low(con), O7); |
1233 __ st(O7, temp_slot_lo); | |
1234 __ set(high(con), O7); | |
1235 __ st(O7, temp_slot_hi); | |
1236 __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg()); | |
1237 } | |
1238 } | |
1239 break; | |
1240 | |
1241 case T_OBJECT: | |
1242 { | |
1243 if (patch_code == lir_patch_none) { | |
1244 jobject2reg(c->as_jobject(), to_reg->as_register()); | |
1245 } else { | |
1246 jobject2reg_with_patching(to_reg->as_register(), info); | |
1247 } | |
1248 } | |
1249 break; | |
1250 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1251 case T_METADATA: |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1252 { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1253 if (patch_code == lir_patch_none) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1254 metadata2reg(c->as_metadata(), to_reg->as_register()); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1255 } else { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1256 klass2reg_with_patching(to_reg->as_register(), info); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1257 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1258 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1259 break; |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1260 |
0 | 1261 case T_FLOAT: |
1262 { | |
1263 address const_addr = __ float_constant(c->as_jfloat()); | |
1264 if (const_addr == NULL) { | |
1265 bailout("const section overflow"); | |
1266 break; | |
1267 } | |
1268 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); | |
727 | 1269 AddressLiteral const_addrlit(const_addr, rspec); |
0 | 1270 if (to_reg->is_single_fpu()) { |
727 | 1271 __ patchable_sethi(const_addrlit, O7); |
0 | 1272 __ relocate(rspec); |
727 | 1273 __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg()); |
0 | 1274 |
1275 } else { | |
1276 assert(to_reg->is_single_cpu(), "Must be a cpu register."); | |
1277 | |
727 | 1278 __ set(const_addrlit, O7); |
2002 | 1279 __ ld(O7, 0, to_reg->as_register()); |
0 | 1280 } |
1281 } | |
1282 break; | |
1283 | |
1284 case T_DOUBLE: | |
1285 { | |
1286 address const_addr = __ double_constant(c->as_jdouble()); | |
1287 if (const_addr == NULL) { | |
1288 bailout("const section overflow"); | |
1289 break; | |
1290 } | |
1291 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); | |
1292 | |
1293 if (to_reg->is_double_fpu()) { | |
727 | 1294 AddressLiteral const_addrlit(const_addr, rspec); |
1295 __ patchable_sethi(const_addrlit, O7); | |
0 | 1296 __ relocate(rspec); |
727 | 1297 __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg()); |
0 | 1298 } else { |
1299 assert(to_reg->is_double_cpu(), "Must be a long register."); | |
1300 #ifdef _LP64 | |
1301 __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo()); | |
1302 #else | |
1303 __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo()); | |
1304 __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi()); | |
1305 #endif | |
1306 } | |
1307 | |
1308 } | |
1309 break; | |
1310 | |
1311 default: | |
1312 ShouldNotReachHere(); | |
1313 } | |
1314 } | |
1315 | |
1316 Address LIR_Assembler::as_Address(LIR_Address* addr) { | |
1317 Register reg = addr->base()->as_register(); | |
727 | 1318 return Address(reg, addr->disp()); |
0 | 1319 } |
1320 | |
1321 | |
1322 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { | |
1323 switch (type) { | |
1324 case T_INT: | |
1325 case T_FLOAT: { | |
1326 Register tmp = O7; | |
1327 Address from = frame_map()->address_for_slot(src->single_stack_ix()); | |
1328 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); | |
1329 __ lduw(from.base(), from.disp(), tmp); | |
1330 __ stw(tmp, to.base(), to.disp()); | |
1331 break; | |
1332 } | |
1333 case T_OBJECT: { | |
1334 Register tmp = O7; | |
1335 Address from = frame_map()->address_for_slot(src->single_stack_ix()); | |
1336 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); | |
1337 __ ld_ptr(from.base(), from.disp(), tmp); | |
1338 __ st_ptr(tmp, to.base(), to.disp()); | |
1339 break; | |
1340 } | |
1341 case T_LONG: | |
1342 case T_DOUBLE: { | |
1343 Register tmp = O7; | |
1344 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); | |
1345 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); | |
1346 __ lduw(from.base(), from.disp(), tmp); | |
1347 __ stw(tmp, to.base(), to.disp()); | |
1348 __ lduw(from.base(), from.disp() + 4, tmp); | |
1349 __ stw(tmp, to.base(), to.disp() + 4); | |
1350 break; | |
1351 } | |
1352 | |
1353 default: | |
1354 ShouldNotReachHere(); | |
1355 } | |
1356 } | |
1357 | |
1358 | |
1359 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { | |
1360 Address base = as_Address(addr); | |
727 | 1361 return Address(base.base(), base.disp() + hi_word_offset_in_bytes); |
0 | 1362 } |
1363 | |
1364 | |
1365 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { | |
1366 Address base = as_Address(addr); | |
727 | 1367 return Address(base.base(), base.disp() + lo_word_offset_in_bytes); |
0 | 1368 } |
1369 | |
1370 | |
1371 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, | |
2002 | 1372 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { |
0 | 1373 |
6739
8a02ca5e5576
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
roland
parents:
6725
diff
changeset
|
1374 assert(type != T_METADATA, "load of metadata ptr not supported"); |
0 | 1375 LIR_Address* addr = src_opr->as_address_ptr(); |
1376 LIR_Opr to_reg = dest; | |
1377 | |
1378 Register src = addr->base()->as_pointer_register(); | |
1379 Register disp_reg = noreg; | |
1380 int disp_value = addr->disp(); | |
1381 bool needs_patching = (patch_code != lir_patch_none); | |
1382 | |
1383 if (addr->base()->type() == T_OBJECT) { | |
1384 __ verify_oop(src); | |
1385 } | |
1386 | |
1387 PatchingStub* patch = NULL; | |
1388 if (needs_patching) { | |
1389 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
1390 assert(!to_reg->is_double_cpu() || | |
1391 patch_code == lir_patch_none || | |
1392 patch_code == lir_patch_normal, "patching doesn't match register"); | |
1393 } | |
1394 | |
1395 if (addr->index()->is_illegal()) { | |
1396 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { | |
1397 if (needs_patching) { | |
727 | 1398 __ patchable_set(0, O7); |
0 | 1399 } else { |
1400 __ set(disp_value, O7); | |
1401 } | |
1402 disp_reg = O7; | |
1403 } | |
1404 } else if (unaligned || PatchALot) { | |
1405 __ add(src, addr->index()->as_register(), O7); | |
1406 src = O7; | |
1407 } else { | |
1408 disp_reg = addr->index()->as_pointer_register(); | |
1409 assert(disp_value == 0, "can't handle 3 operand addresses"); | |
1410 } | |
1411 | |
1412 // remember the offset of the load. The patching_epilog must be done | |
1413 // before the call to add_debug_info, otherwise the PcDescs don't get | |
1414 // entered in increasing order. | |
1415 int offset = code_offset(); | |
1416 | |
1417 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); | |
1418 if (disp_reg == noreg) { | |
2002 | 1419 offset = load(src, disp_value, to_reg, type, wide, unaligned); |
0 | 1420 } else { |
1421 assert(!unaligned, "can't handle this"); | |
2002 | 1422 offset = load(src, disp_reg, to_reg, type, wide); |
0 | 1423 } |
1424 | |
1425 if (patch != NULL) { | |
1426 patching_epilog(patch, patch_code, src, info); | |
1427 } | |
1428 if (info != NULL) add_debug_info_for_null_check(offset, info); | |
1429 } | |
1430 | |
1431 | |
1432 void LIR_Assembler::prefetchr(LIR_Opr src) { | |
1433 LIR_Address* addr = src->as_address_ptr(); | |
1434 Address from_addr = as_Address(addr); | |
1435 | |
1436 if (VM_Version::has_v9()) { | |
1437 __ prefetch(from_addr, Assembler::severalReads); | |
1438 } | |
1439 } | |
1440 | |
1441 | |
1442 void LIR_Assembler::prefetchw(LIR_Opr src) { | |
1443 LIR_Address* addr = src->as_address_ptr(); | |
1444 Address from_addr = as_Address(addr); | |
1445 | |
1446 if (VM_Version::has_v9()) { | |
1447 __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads); | |
1448 } | |
1449 } | |
1450 | |
1451 | |
1452 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { | |
1453 Address addr; | |
1454 if (src->is_single_word()) { | |
1455 addr = frame_map()->address_for_slot(src->single_stack_ix()); | |
1456 } else if (src->is_double_word()) { | |
1457 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); | |
1458 } | |
1459 | |
1460 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; | |
2002 | 1461 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); |
0 | 1462 } |
1463 | |
1464 | |
1465 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { | |
1466 Address addr; | |
1467 if (dest->is_single_word()) { | |
1468 addr = frame_map()->address_for_slot(dest->single_stack_ix()); | |
1469 } else if (dest->is_double_word()) { | |
1470 addr = frame_map()->address_for_slot(dest->double_stack_ix()); | |
1471 } | |
1472 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; | |
2002 | 1473 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); |
0 | 1474 } |
1475 | |
1476 | |
1477 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { | |
1478 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { | |
1479 if (from_reg->is_double_fpu()) { | |
1480 // double to double moves | |
1481 assert(to_reg->is_double_fpu(), "should match"); | |
1482 __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg()); | |
1483 } else { | |
1484 // float to float moves | |
1485 assert(to_reg->is_single_fpu(), "should match"); | |
1486 __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg()); | |
1487 } | |
1488 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { | |
1489 if (from_reg->is_double_cpu()) { | |
1490 #ifdef _LP64 | |
1491 __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register()); | |
1492 #else | |
1493 assert(to_reg->is_double_cpu() && | |
1494 from_reg->as_register_hi() != to_reg->as_register_lo() && | |
1495 from_reg->as_register_lo() != to_reg->as_register_hi(), | |
1496 "should both be long and not overlap"); | |
1497 // long to long moves | |
1498 __ mov(from_reg->as_register_hi(), to_reg->as_register_hi()); | |
1499 __ mov(from_reg->as_register_lo(), to_reg->as_register_lo()); | |
1500 #endif | |
1501 #ifdef _LP64 | |
1502 } else if (to_reg->is_double_cpu()) { | |
1503 // int to int moves | |
1504 __ mov(from_reg->as_register(), to_reg->as_register_lo()); | |
1505 #endif | |
1506 } else { | |
1507 // int to int moves | |
1508 __ mov(from_reg->as_register(), to_reg->as_register()); | |
1509 } | |
1510 } else { | |
1511 ShouldNotReachHere(); | |
1512 } | |
1513 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { | |
1514 __ verify_oop(to_reg->as_register()); | |
1515 } | |
1516 } | |
1517 | |
1518 | |
1519 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, | |
1520 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, | |
2002 | 1521 bool wide, bool unaligned) { |
6739
8a02ca5e5576
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
roland
parents:
6725
diff
changeset
|
1522 assert(type != T_METADATA, "store of metadata ptr not supported"); |
0 | 1523 LIR_Address* addr = dest->as_address_ptr(); |
1524 | |
1525 Register src = addr->base()->as_pointer_register(); | |
1526 Register disp_reg = noreg; | |
1527 int disp_value = addr->disp(); | |
1528 bool needs_patching = (patch_code != lir_patch_none); | |
1529 | |
1530 if (addr->base()->is_oop_register()) { | |
1531 __ verify_oop(src); | |
1532 } | |
1533 | |
1534 PatchingStub* patch = NULL; | |
1535 if (needs_patching) { | |
1536 patch = new PatchingStub(_masm, PatchingStub::access_field_id); | |
1537 assert(!from_reg->is_double_cpu() || | |
1538 patch_code == lir_patch_none || | |
1539 patch_code == lir_patch_normal, "patching doesn't match register"); | |
1540 } | |
1541 | |
1542 if (addr->index()->is_illegal()) { | |
1543 if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) { | |
1544 if (needs_patching) { | |
727 | 1545 __ patchable_set(0, O7); |
0 | 1546 } else { |
1547 __ set(disp_value, O7); | |
1548 } | |
1549 disp_reg = O7; | |
1550 } | |
1551 } else if (unaligned || PatchALot) { | |
1552 __ add(src, addr->index()->as_register(), O7); | |
1553 src = O7; | |
1554 } else { | |
1555 disp_reg = addr->index()->as_pointer_register(); | |
1556 assert(disp_value == 0, "can't handle 3 operand addresses"); | |
1557 } | |
1558 | |
1559 // remember the offset of the store. The patching_epilog must be done | |
1560 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get | |
1561 // entered in increasing order. | |
1562 int offset; | |
1563 | |
1564 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); | |
1565 if (disp_reg == noreg) { | |
2002 | 1566 offset = store(from_reg, src, disp_value, type, wide, unaligned); |
0 | 1567 } else { |
1568 assert(!unaligned, "can't handle this"); | |
2002 | 1569 offset = store(from_reg, src, disp_reg, type, wide); |
0 | 1570 } |
1571 | |
1572 if (patch != NULL) { | |
1573 patching_epilog(patch, patch_code, src, info); | |
1574 } | |
1575 | |
1576 if (info != NULL) add_debug_info_for_null_check(offset, info); | |
1577 } | |
1578 | |
1579 | |
1580 void LIR_Assembler::return_op(LIR_Opr result) { | |
1581 // the poll may need a register so just pick one that isn't the return register | |
1783 | 1582 #if defined(TIERED) && !defined(_LP64) |
0 | 1583 if (result->type_field() == LIR_OprDesc::long_type) { |
1584 // Must move the result to G1 | |
1585 // Must leave proper result in O0,O1 and G1 (TIERED only) | |
1586 __ sllx(I0, 32, G1); // Shift bits into high G1 | |
1587 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) | |
1588 __ or3 (I1, G1, G1); // OR 64 bits into G1 | |
1783 | 1589 #ifdef ASSERT |
1590 // mangle it so any problems will show up | |
1591 __ set(0xdeadbeef, I0); | |
1592 __ set(0xdeadbeef, I1); | |
1593 #endif | |
0 | 1594 } |
1595 #endif // TIERED | |
1596 __ set((intptr_t)os::get_polling_page(), L0); | |
1597 __ relocate(relocInfo::poll_return_type); | |
1598 __ ld_ptr(L0, 0, G0); | |
1599 __ ret(); | |
1600 __ delayed()->restore(); | |
1601 } | |
1602 | |
1603 | |
1604 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { | |
1605 __ set((intptr_t)os::get_polling_page(), tmp->as_register()); | |
1606 if (info != NULL) { | |
1607 add_debug_info_for_branch(info); | |
1608 } else { | |
1609 __ relocate(relocInfo::poll_type); | |
1610 } | |
1611 | |
1612 int offset = __ offset(); | |
1613 __ ld_ptr(tmp->as_register(), 0, G0); | |
1614 | |
1615 return offset; | |
1616 } | |
1617 | |
1618 | |
1619 void LIR_Assembler::emit_static_call_stub() { | |
1620 address call_pc = __ pc(); | |
1621 address stub = __ start_a_stub(call_stub_size); | |
1622 if (stub == NULL) { | |
1623 bailout("static call stub overflow"); | |
1624 return; | |
1625 } | |
1626 | |
1627 int start = __ offset(); | |
1628 __ relocate(static_stub_Relocation::spec(call_pc)); | |
1629 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
1630 __ set_metadata(NULL, G5); |
0 | 1631 // must be set to -1 at code generation time |
727 | 1632 AddressLiteral addrlit(-1); |
1633 __ jump_to(addrlit, G3); | |
0 | 1634 __ delayed()->nop(); |
1635 | |
1636 assert(__ offset() - start <= call_stub_size, "stub too big"); | |
1637 __ end_a_stub(); | |
1638 } | |
1639 | |
1640 | |
1641 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { | |
1642 if (opr1->is_single_fpu()) { | |
1643 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg()); | |
1644 } else if (opr1->is_double_fpu()) { | |
1645 __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg()); | |
1646 } else if (opr1->is_single_cpu()) { | |
1647 if (opr2->is_constant()) { | |
1648 switch (opr2->as_constant_ptr()->type()) { | |
1649 case T_INT: | |
1650 { jint con = opr2->as_constant_ptr()->as_jint(); | |
1651 if (Assembler::is_simm13(con)) { | |
1652 __ cmp(opr1->as_register(), con); | |
1653 } else { | |
1654 __ set(con, O7); | |
1655 __ cmp(opr1->as_register(), O7); | |
1656 } | |
1657 } | |
1658 break; | |
1659 | |
1660 case T_OBJECT: | |
1661 // there are only equal/notequal comparisions on objects | |
1662 { jobject con = opr2->as_constant_ptr()->as_jobject(); | |
1663 if (con == NULL) { | |
1664 __ cmp(opr1->as_register(), 0); | |
1665 } else { | |
1666 jobject2reg(con, O7); | |
1667 __ cmp(opr1->as_register(), O7); | |
1668 } | |
1669 } | |
1670 break; | |
1671 | |
1672 default: | |
1673 ShouldNotReachHere(); | |
1674 break; | |
1675 } | |
1676 } else { | |
1677 if (opr2->is_address()) { | |
1678 LIR_Address * addr = opr2->as_address_ptr(); | |
1679 BasicType type = addr->type(); | |
1680 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); | |
1681 else __ ld(as_Address(addr), O7); | |
1682 __ cmp(opr1->as_register(), O7); | |
1683 } else { | |
1684 __ cmp(opr1->as_register(), opr2->as_register()); | |
1685 } | |
1686 } | |
1687 } else if (opr1->is_double_cpu()) { | |
1688 Register xlo = opr1->as_register_lo(); | |
1689 Register xhi = opr1->as_register_hi(); | |
1690 if (opr2->is_constant() && opr2->as_jlong() == 0) { | |
1691 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases"); | |
1692 #ifdef _LP64 | |
1693 __ orcc(xhi, G0, G0); | |
1694 #else | |
1695 __ orcc(xhi, xlo, G0); | |
1696 #endif | |
1697 } else if (opr2->is_register()) { | |
1698 Register ylo = opr2->as_register_lo(); | |
1699 Register yhi = opr2->as_register_hi(); | |
1700 #ifdef _LP64 | |
1701 __ cmp(xlo, ylo); | |
1702 #else | |
1703 __ subcc(xlo, ylo, xlo); | |
1704 __ subccc(xhi, yhi, xhi); | |
1705 if (condition == lir_cond_equal || condition == lir_cond_notEqual) { | |
1706 __ orcc(xhi, xlo, G0); | |
1707 } | |
1708 #endif | |
1709 } else { | |
1710 ShouldNotReachHere(); | |
1711 } | |
1712 } else if (opr1->is_address()) { | |
1713 LIR_Address * addr = opr1->as_address_ptr(); | |
1714 BasicType type = addr->type(); | |
1715 assert (opr2->is_constant(), "Checking"); | |
1716 if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7); | |
1717 else __ ld(as_Address(addr), O7); | |
1718 __ cmp(O7, opr2->as_constant_ptr()->as_jint()); | |
1719 } else { | |
1720 ShouldNotReachHere(); | |
1721 } | |
1722 } | |
1723 | |
1724 | |
1725 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ | |
1726 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { | |
1727 bool is_unordered_less = (code == lir_ucmp_fd2i); | |
1728 if (left->is_single_fpu()) { | |
1729 __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register()); | |
1730 } else if (left->is_double_fpu()) { | |
1731 __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register()); | |
1732 } else { | |
1733 ShouldNotReachHere(); | |
1734 } | |
1735 } else if (code == lir_cmp_l2i) { | |
1369 | 1736 #ifdef _LP64 |
1737 __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register()); | |
1738 #else | |
0 | 1739 __ lcmp(left->as_register_hi(), left->as_register_lo(), |
1740 right->as_register_hi(), right->as_register_lo(), | |
1741 dst->as_register()); | |
1369 | 1742 #endif |
0 | 1743 } else { |
1744 ShouldNotReachHere(); | |
1745 } | |
1746 } | |
1747 | |
1748 | |
2089
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
1749 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { |
0 | 1750 Assembler::Condition acond; |
1751 switch (condition) { | |
1752 case lir_cond_equal: acond = Assembler::equal; break; | |
1753 case lir_cond_notEqual: acond = Assembler::notEqual; break; | |
1754 case lir_cond_less: acond = Assembler::less; break; | |
1755 case lir_cond_lessEqual: acond = Assembler::lessEqual; break; | |
1756 case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break; | |
1757 case lir_cond_greater: acond = Assembler::greater; break; | |
1758 case lir_cond_aboveEqual: acond = Assembler::greaterEqualUnsigned; break; | |
1759 case lir_cond_belowEqual: acond = Assembler::lessEqualUnsigned; break; | |
1760 default: ShouldNotReachHere(); | |
1761 }; | |
1762 | |
1763 if (opr1->is_constant() && opr1->type() == T_INT) { | |
1764 Register dest = result->as_register(); | |
1765 // load up first part of constant before branch | |
1766 // and do the rest in the delay slot. | |
1767 if (!Assembler::is_simm13(opr1->as_jint())) { | |
1768 __ sethi(opr1->as_jint(), dest); | |
1769 } | |
1770 } else if (opr1->is_constant()) { | |
1771 const2reg(opr1, result, lir_patch_none, NULL); | |
1772 } else if (opr1->is_register()) { | |
1773 reg2reg(opr1, result); | |
1774 } else if (opr1->is_stack()) { | |
1775 stack2reg(opr1, result, result->type()); | |
1776 } else { | |
1777 ShouldNotReachHere(); | |
1778 } | |
1779 Label skip; | |
2089
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
1780 #ifdef _LP64 |
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
1781 if (type == T_INT) { |
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
1782 __ br(acond, false, Assembler::pt, skip); |
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
1783 } else |
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
1784 #endif |
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
1785 __ brx(acond, false, Assembler::pt, skip); // checks icc on 32bit and xcc on 64bit |
0 | 1786 if (opr1->is_constant() && opr1->type() == T_INT) { |
1787 Register dest = result->as_register(); | |
1788 if (Assembler::is_simm13(opr1->as_jint())) { | |
1789 __ delayed()->or3(G0, opr1->as_jint(), dest); | |
1790 } else { | |
1791 // the sethi has been done above, so just put in the low 10 bits | |
1792 __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest); | |
1793 } | |
1794 } else { | |
1795 // can't do anything useful in the delay slot | |
1796 __ delayed()->nop(); | |
1797 } | |
1798 if (opr2->is_constant()) { | |
1799 const2reg(opr2, result, lir_patch_none, NULL); | |
1800 } else if (opr2->is_register()) { | |
1801 reg2reg(opr2, result); | |
1802 } else if (opr2->is_stack()) { | |
1803 stack2reg(opr2, result, result->type()); | |
1804 } else { | |
1805 ShouldNotReachHere(); | |
1806 } | |
1807 __ bind(skip); | |
1808 } | |
1809 | |
1810 | |
1811 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) { | |
1812 assert(info == NULL, "unused on this code path"); | |
1813 assert(left->is_register(), "wrong items state"); | |
1814 assert(dest->is_register(), "wrong items state"); | |
1815 | |
1816 if (right->is_register()) { | |
1817 if (dest->is_float_kind()) { | |
1818 | |
1819 FloatRegister lreg, rreg, res; | |
1820 FloatRegisterImpl::Width w; | |
1821 if (right->is_single_fpu()) { | |
1822 w = FloatRegisterImpl::S; | |
1823 lreg = left->as_float_reg(); | |
1824 rreg = right->as_float_reg(); | |
1825 res = dest->as_float_reg(); | |
1826 } else { | |
1827 w = FloatRegisterImpl::D; | |
1828 lreg = left->as_double_reg(); | |
1829 rreg = right->as_double_reg(); | |
1830 res = dest->as_double_reg(); | |
1831 } | |
1832 | |
1833 switch (code) { | |
1834 case lir_add: __ fadd(w, lreg, rreg, res); break; | |
1835 case lir_sub: __ fsub(w, lreg, rreg, res); break; | |
1836 case lir_mul: // fall through | |
1837 case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break; | |
1838 case lir_div: // fall through | |
1839 case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break; | |
1840 default: ShouldNotReachHere(); | |
1841 } | |
1842 | |
1843 } else if (dest->is_double_cpu()) { | |
1844 #ifdef _LP64 | |
1845 Register dst_lo = dest->as_register_lo(); | |
1846 Register op1_lo = left->as_pointer_register(); | |
1847 Register op2_lo = right->as_pointer_register(); | |
1848 | |
1849 switch (code) { | |
1850 case lir_add: | |
1851 __ add(op1_lo, op2_lo, dst_lo); | |
1852 break; | |
1853 | |
1854 case lir_sub: | |
1855 __ sub(op1_lo, op2_lo, dst_lo); | |
1856 break; | |
1857 | |
1858 default: ShouldNotReachHere(); | |
1859 } | |
1860 #else | |
1861 Register op1_lo = left->as_register_lo(); | |
1862 Register op1_hi = left->as_register_hi(); | |
1863 Register op2_lo = right->as_register_lo(); | |
1864 Register op2_hi = right->as_register_hi(); | |
1865 Register dst_lo = dest->as_register_lo(); | |
1866 Register dst_hi = dest->as_register_hi(); | |
1867 | |
1868 switch (code) { | |
1869 case lir_add: | |
1870 __ addcc(op1_lo, op2_lo, dst_lo); | |
1871 __ addc (op1_hi, op2_hi, dst_hi); | |
1872 break; | |
1873 | |
1874 case lir_sub: | |
1875 __ subcc(op1_lo, op2_lo, dst_lo); | |
1876 __ subc (op1_hi, op2_hi, dst_hi); | |
1877 break; | |
1878 | |
1879 default: ShouldNotReachHere(); | |
1880 } | |
1881 #endif | |
1882 } else { | |
1883 assert (right->is_single_cpu(), "Just Checking"); | |
1884 | |
1885 Register lreg = left->as_register(); | |
1886 Register res = dest->as_register(); | |
1887 Register rreg = right->as_register(); | |
1888 switch (code) { | |
1889 case lir_add: __ add (lreg, rreg, res); break; | |
1890 case lir_sub: __ sub (lreg, rreg, res); break; | |
1891 case lir_mul: __ mult (lreg, rreg, res); break; | |
1892 default: ShouldNotReachHere(); | |
1893 } | |
1894 } | |
1895 } else { | |
1896 assert (right->is_constant(), "must be constant"); | |
1897 | |
1898 if (dest->is_single_cpu()) { | |
1899 Register lreg = left->as_register(); | |
1900 Register res = dest->as_register(); | |
1901 int simm13 = right->as_constant_ptr()->as_jint(); | |
1902 | |
1903 switch (code) { | |
1904 case lir_add: __ add (lreg, simm13, res); break; | |
1905 case lir_sub: __ sub (lreg, simm13, res); break; | |
1906 case lir_mul: __ mult (lreg, simm13, res); break; | |
1907 default: ShouldNotReachHere(); | |
1908 } | |
1909 } else { | |
1910 Register lreg = left->as_pointer_register(); | |
1911 Register res = dest->as_register_lo(); | |
1912 long con = right->as_constant_ptr()->as_jlong(); | |
1913 assert(Assembler::is_simm13(con), "must be simm13"); | |
1914 | |
1915 switch (code) { | |
1916 case lir_add: __ add (lreg, (int)con, res); break; | |
1917 case lir_sub: __ sub (lreg, (int)con, res); break; | |
1918 case lir_mul: __ mult (lreg, (int)con, res); break; | |
1919 default: ShouldNotReachHere(); | |
1920 } | |
1921 } | |
1922 } | |
1923 } | |
1924 | |
1925 | |
1926 void LIR_Assembler::fpop() { | |
1927 // do nothing | |
1928 } | |
1929 | |
1930 | |
1931 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { | |
1932 switch (code) { | |
1933 case lir_sin: | |
1934 case lir_tan: | |
1935 case lir_cos: { | |
1936 assert(thread->is_valid(), "preserve the thread object for performance reasons"); | |
1937 assert(dest->as_double_reg() == F0, "the result will be in f0/f1"); | |
1938 break; | |
1939 } | |
1940 case lir_sqrt: { | |
1941 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); | |
1942 FloatRegister src_reg = value->as_double_reg(); | |
1943 FloatRegister dst_reg = dest->as_double_reg(); | |
1944 __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg); | |
1945 break; | |
1946 } | |
1947 case lir_abs: { | |
1948 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); | |
1949 FloatRegister src_reg = value->as_double_reg(); | |
1950 FloatRegister dst_reg = dest->as_double_reg(); | |
1951 __ fabs(FloatRegisterImpl::D, src_reg, dst_reg); | |
1952 break; | |
1953 } | |
1954 default: { | |
1955 ShouldNotReachHere(); | |
1956 break; | |
1957 } | |
1958 } | |
1959 } | |
1960 | |
1961 | |
1962 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { | |
1963 if (right->is_constant()) { | |
1964 if (dest->is_single_cpu()) { | |
1965 int simm13 = right->as_constant_ptr()->as_jint(); | |
1966 switch (code) { | |
1967 case lir_logic_and: __ and3 (left->as_register(), simm13, dest->as_register()); break; | |
1968 case lir_logic_or: __ or3 (left->as_register(), simm13, dest->as_register()); break; | |
1969 case lir_logic_xor: __ xor3 (left->as_register(), simm13, dest->as_register()); break; | |
1970 default: ShouldNotReachHere(); | |
1971 } | |
1972 } else { | |
1973 long c = right->as_constant_ptr()->as_jlong(); | |
1974 assert(c == (int)c && Assembler::is_simm13(c), "out of range"); | |
1975 int simm13 = (int)c; | |
1976 switch (code) { | |
1977 case lir_logic_and: | |
1978 #ifndef _LP64 | |
1979 __ and3 (left->as_register_hi(), 0, dest->as_register_hi()); | |
1980 #endif | |
1981 __ and3 (left->as_register_lo(), simm13, dest->as_register_lo()); | |
1982 break; | |
1983 | |
1984 case lir_logic_or: | |
1985 #ifndef _LP64 | |
1986 __ or3 (left->as_register_hi(), 0, dest->as_register_hi()); | |
1987 #endif | |
1988 __ or3 (left->as_register_lo(), simm13, dest->as_register_lo()); | |
1989 break; | |
1990 | |
1991 case lir_logic_xor: | |
1992 #ifndef _LP64 | |
1993 __ xor3 (left->as_register_hi(), 0, dest->as_register_hi()); | |
1994 #endif | |
1995 __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo()); | |
1996 break; | |
1997 | |
1998 default: ShouldNotReachHere(); | |
1999 } | |
2000 } | |
2001 } else { | |
2002 assert(right->is_register(), "right should be in register"); | |
2003 | |
2004 if (dest->is_single_cpu()) { | |
2005 switch (code) { | |
2006 case lir_logic_and: __ and3 (left->as_register(), right->as_register(), dest->as_register()); break; | |
2007 case lir_logic_or: __ or3 (left->as_register(), right->as_register(), dest->as_register()); break; | |
2008 case lir_logic_xor: __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break; | |
2009 default: ShouldNotReachHere(); | |
2010 } | |
2011 } else { | |
2012 #ifdef _LP64 | |
2013 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : | |
2014 left->as_register_lo(); | |
2015 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : | |
2016 right->as_register_lo(); | |
2017 | |
2018 switch (code) { | |
2019 case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break; | |
2020 case lir_logic_or: __ or3 (l, r, dest->as_register_lo()); break; | |
2021 case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break; | |
2022 default: ShouldNotReachHere(); | |
2023 } | |
2024 #else | |
2025 switch (code) { | |
2026 case lir_logic_and: | |
2027 __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); | |
2028 __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); | |
2029 break; | |
2030 | |
2031 case lir_logic_or: | |
2032 __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); | |
2033 __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); | |
2034 break; | |
2035 | |
2036 case lir_logic_xor: | |
2037 __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi()); | |
2038 __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo()); | |
2039 break; | |
2040 | |
2041 default: ShouldNotReachHere(); | |
2042 } | |
2043 #endif | |
2044 } | |
2045 } | |
2046 } | |
2047 | |
2048 | |
2049 int LIR_Assembler::shift_amount(BasicType t) { | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
0
diff
changeset
|
2050 int elem_size = type2aelembytes(t); |
0 | 2051 switch (elem_size) { |
2052 case 1 : return 0; | |
2053 case 2 : return 1; | |
2054 case 4 : return 2; | |
2055 case 8 : return 3; | |
2056 } | |
2057 ShouldNotReachHere(); | |
2058 return -1; | |
2059 } | |
2060 | |
2061 | |
1378
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2062 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { |
0 | 2063 assert(exceptionOop->as_register() == Oexception, "should match"); |
1378
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2064 assert(exceptionPC->as_register() == Oissuing_pc, "should match"); |
0 | 2065 |
2066 info->add_register_oop(exceptionOop); | |
2067 | |
1378
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2068 // reuse the debug info from the safepoint poll for the throw op itself |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2069 address pc_for_athrow = __ pc(); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2070 int pc_for_athrow_offset = __ offset(); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2071 RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2072 __ set(pc_for_athrow, Oissuing_pc, rspec); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2073 add_call_info(pc_for_athrow_offset, info); // for exception handler |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2074 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2075 __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2076 __ delayed()->nop(); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2077 } |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2078 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2079 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2080 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2081 assert(exceptionOop->as_register() == Oexception, "should match"); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2082 |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2083 __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry); |
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2084 __ delayed()->nop(); |
0 | 2085 } |
2086 | |
2087 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { | |
2088 Register src = op->src()->as_register(); | |
2089 Register dst = op->dst()->as_register(); | |
2090 Register src_pos = op->src_pos()->as_register(); | |
2091 Register dst_pos = op->dst_pos()->as_register(); | |
2092 Register length = op->length()->as_register(); | |
2093 Register tmp = op->tmp()->as_register(); | |
2094 Register tmp2 = O7; | |
2095 | |
2096 int flags = op->flags(); | |
2097 ciArrayKlass* default_type = op->expected_type(); | |
2098 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; | |
2099 if (basic_type == T_ARRAY) basic_type = T_OBJECT; | |
2100 | |
2449
bb22629531fa
7033732: C1: When calling c2 arraycopy stubs offsets and length must have clear upper 32bits
iveresov
parents:
2446
diff
changeset
|
2101 #ifdef _LP64 |
bb22629531fa
7033732: C1: When calling c2 arraycopy stubs offsets and length must have clear upper 32bits
iveresov
parents:
2446
diff
changeset
|
2102 // higher 32bits must be null |
bb22629531fa
7033732: C1: When calling c2 arraycopy stubs offsets and length must have clear upper 32bits
iveresov
parents:
2446
diff
changeset
|
2103 __ sra(dst_pos, 0, dst_pos); |
bb22629531fa
7033732: C1: When calling c2 arraycopy stubs offsets and length must have clear upper 32bits
iveresov
parents:
2446
diff
changeset
|
2104 __ sra(src_pos, 0, src_pos); |
bb22629531fa
7033732: C1: When calling c2 arraycopy stubs offsets and length must have clear upper 32bits
iveresov
parents:
2446
diff
changeset
|
2105 __ sra(length, 0, length); |
bb22629531fa
7033732: C1: When calling c2 arraycopy stubs offsets and length must have clear upper 32bits
iveresov
parents:
2446
diff
changeset
|
2106 #endif |
bb22629531fa
7033732: C1: When calling c2 arraycopy stubs offsets and length must have clear upper 32bits
iveresov
parents:
2446
diff
changeset
|
2107 |
0 | 2108 // set up the arraycopy stub information |
2109 ArrayCopyStub* stub = op->stub(); | |
2110 | |
2111 // always do stub if no type information is available. it's ok if | |
2112 // the known type isn't loaded since the code sanity checks | |
2113 // in debug mode and the type isn't required when we know the exact type | |
2114 // also check that the type is an array type. | |
2446 | 2115 if (op->expected_type() == NULL) { |
0 | 2116 __ mov(src, O0); |
2117 __ mov(src_pos, O1); | |
2118 __ mov(dst, O2); | |
2119 __ mov(dst_pos, O3); | |
2120 __ mov(length, O4); | |
2446 | 2121 address copyfunc_addr = StubRoutines::generic_arraycopy(); |
2122 | |
2123 if (copyfunc_addr == NULL) { // Use C version if stub was not generated | |
2124 __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy)); | |
2125 } else { | |
2126 #ifndef PRODUCT | |
2127 if (PrintC1Statistics) { | |
2128 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; | |
2129 __ inc_counter(counter, G1, G3); | |
2130 } | |
2131 #endif | |
2132 __ call_VM_leaf(tmp, copyfunc_addr); | |
2133 } | |
2134 | |
2135 if (copyfunc_addr != NULL) { | |
2136 __ xor3(O0, -1, tmp); | |
2137 __ sub(length, tmp, length); | |
2138 __ add(src_pos, tmp, src_pos); | |
3839 | 2139 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); |
2446 | 2140 __ delayed()->add(dst_pos, tmp, dst_pos); |
2141 } else { | |
3839 | 2142 __ cmp_zero_and_br(Assembler::less, O0, *stub->entry()); |
2446 | 2143 __ delayed()->nop(); |
2144 } | |
0 | 2145 __ bind(*stub->continuation()); |
2146 return; | |
2147 } | |
2148 | |
2149 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); | |
2150 | |
2151 // make sure src and dst are non-null and load array length | |
2152 if (flags & LIR_OpArrayCopy::src_null_check) { | |
2153 __ tst(src); | |
2002 | 2154 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); |
0 | 2155 __ delayed()->nop(); |
2156 } | |
2157 | |
2158 if (flags & LIR_OpArrayCopy::dst_null_check) { | |
2159 __ tst(dst); | |
2002 | 2160 __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); |
0 | 2161 __ delayed()->nop(); |
2162 } | |
2163 | |
2164 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { | |
2165 // test src_pos register | |
3839 | 2166 __ cmp_zero_and_br(Assembler::less, src_pos, *stub->entry()); |
0 | 2167 __ delayed()->nop(); |
2168 } | |
2169 | |
2170 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { | |
2171 // test dst_pos register | |
3839 | 2172 __ cmp_zero_and_br(Assembler::less, dst_pos, *stub->entry()); |
0 | 2173 __ delayed()->nop(); |
2174 } | |
2175 | |
2176 if (flags & LIR_OpArrayCopy::length_positive_check) { | |
2177 // make sure length isn't negative | |
3839 | 2178 __ cmp_zero_and_br(Assembler::less, length, *stub->entry()); |
0 | 2179 __ delayed()->nop(); |
2180 } | |
2181 | |
2182 if (flags & LIR_OpArrayCopy::src_range_check) { | |
2183 __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2); | |
2184 __ add(length, src_pos, tmp); | |
2185 __ cmp(tmp2, tmp); | |
2186 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); | |
2187 __ delayed()->nop(); | |
2188 } | |
2189 | |
2190 if (flags & LIR_OpArrayCopy::dst_range_check) { | |
2191 __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2); | |
2192 __ add(length, dst_pos, tmp); | |
2193 __ cmp(tmp2, tmp); | |
2194 __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); | |
2195 __ delayed()->nop(); | |
2196 } | |
2197 | |
2446 | 2198 int shift = shift_amount(basic_type); |
2199 | |
0 | 2200 if (flags & LIR_OpArrayCopy::type_check) { |
2446 | 2201 // We don't know the array types are compatible |
2202 if (basic_type != T_OBJECT) { | |
2203 // Simple test for basic type arrays | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2204 if (UseCompressedKlassPointers) { |
2446 | 2205 // We don't need decode because we just need to compare |
2206 __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); | |
2207 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); | |
2208 __ cmp(tmp, tmp2); | |
2209 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); | |
2210 } else { | |
2211 __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); | |
2212 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); | |
2213 __ cmp(tmp, tmp2); | |
2214 __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); | |
2215 } | |
2216 __ delayed()->nop(); | |
2002 | 2217 } else { |
2446 | 2218 // For object arrays, if src is a sub class of dst then we can |
2219 // safely do the copy. | |
2220 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); | |
2221 | |
2222 Label cont, slow; | |
2223 assert_different_registers(tmp, tmp2, G3, G1); | |
2224 | |
2225 __ load_klass(src, G3); | |
2226 __ load_klass(dst, G1); | |
2227 | |
2228 __ check_klass_subtype_fast_path(G3, G1, tmp, tmp2, &cont, copyfunc_addr == NULL ? stub->entry() : &slow, NULL); | |
2229 | |
2230 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); | |
2231 __ delayed()->nop(); | |
2232 | |
2233 __ cmp(G3, 0); | |
2234 if (copyfunc_addr != NULL) { // use stub if available | |
2235 // src is not a sub class of dst so we have to do a | |
2236 // per-element check. | |
2237 __ br(Assembler::notEqual, false, Assembler::pt, cont); | |
2238 __ delayed()->nop(); | |
2239 | |
2240 __ bind(slow); | |
2241 | |
2242 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; | |
2243 if ((flags & mask) != mask) { | |
2244 // Check that at least both of them object arrays. | |
2245 assert(flags & mask, "one of the two should be known to be an object array"); | |
2246 | |
2247 if (!(flags & LIR_OpArrayCopy::src_objarray)) { | |
2248 __ load_klass(src, tmp); | |
2249 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { | |
2250 __ load_klass(dst, tmp); | |
2251 } | |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4114
diff
changeset
|
2252 int lh_offset = in_bytes(Klass::layout_helper_offset()); |
2446 | 2253 |
2254 __ lduw(tmp, lh_offset, tmp2); | |
2255 | |
2256 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); | |
2257 __ set(objArray_lh, tmp); | |
2258 __ cmp(tmp, tmp2); | |
2259 __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); | |
2260 __ delayed()->nop(); | |
2261 } | |
2262 | |
2263 Register src_ptr = O0; | |
2264 Register dst_ptr = O1; | |
2265 Register len = O2; | |
2266 Register chk_off = O3; | |
2267 Register super_k = O4; | |
2268 | |
2269 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); | |
2270 if (shift == 0) { | |
2271 __ add(src_ptr, src_pos, src_ptr); | |
2272 } else { | |
2273 __ sll(src_pos, shift, tmp); | |
2274 __ add(src_ptr, tmp, src_ptr); | |
2275 } | |
2276 | |
2277 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); | |
2278 if (shift == 0) { | |
2279 __ add(dst_ptr, dst_pos, dst_ptr); | |
2280 } else { | |
2281 __ sll(dst_pos, shift, tmp); | |
2282 __ add(dst_ptr, tmp, dst_ptr); | |
2283 } | |
2284 __ mov(length, len); | |
2285 __ load_klass(dst, tmp); | |
2286 | |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4114
diff
changeset
|
2287 int ek_offset = in_bytes(objArrayKlass::element_klass_offset()); |
2446 | 2288 __ ld_ptr(tmp, ek_offset, super_k); |
2289 | |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4114
diff
changeset
|
2290 int sco_offset = in_bytes(Klass::super_check_offset_offset()); |
2446 | 2291 __ lduw(super_k, sco_offset, chk_off); |
2292 | |
2293 __ call_VM_leaf(tmp, copyfunc_addr); | |
2294 | |
2295 #ifndef PRODUCT | |
2296 if (PrintC1Statistics) { | |
2297 Label failed; | |
3839 | 2298 __ br_notnull_short(O0, Assembler::pn, failed); |
2446 | 2299 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_cnt, G1, G3); |
2300 __ bind(failed); | |
2301 } | |
2302 #endif | |
2303 | |
2304 __ br_null(O0, false, Assembler::pt, *stub->continuation()); | |
2305 __ delayed()->xor3(O0, -1, tmp); | |
2306 | |
2307 #ifndef PRODUCT | |
2308 if (PrintC1Statistics) { | |
2309 __ inc_counter((address)&Runtime1::_arraycopy_checkcast_attempt_cnt, G1, G3); | |
2310 } | |
2311 #endif | |
2312 | |
2313 __ sub(length, tmp, length); | |
2314 __ add(src_pos, tmp, src_pos); | |
2315 __ br(Assembler::always, false, Assembler::pt, *stub->entry()); | |
2316 __ delayed()->add(dst_pos, tmp, dst_pos); | |
2317 | |
2318 __ bind(cont); | |
2319 } else { | |
2320 __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); | |
2321 __ delayed()->nop(); | |
2322 __ bind(cont); | |
2323 } | |
2002 | 2324 } |
0 | 2325 } |
2326 | |
2327 #ifdef ASSERT | |
2328 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { | |
2329 // Sanity check the known type with the incoming class. For the | |
2330 // primitive case the types must match exactly with src.klass and | |
2331 // dst.klass each exactly matching the default type. For the | |
2332 // object array case, if no type check is needed then either the | |
2333 // dst type is exactly the expected type and the src type is a | |
2334 // subtype which we can't check or src is the same array as dst | |
2335 // but not necessarily exactly of type default_type. | |
2336 Label known_ok, halt; | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2337 metadata2reg(op->expected_type()->constant_encoding(), tmp); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2338 if (UseCompressedKlassPointers) { |
2002 | 2339 // tmp holds the default type. It currently comes uncompressed after the |
2340 // load of a constant, so encode it. | |
2341 __ encode_heap_oop(tmp); | |
2342 // load the raw value of the dst klass, since we will be comparing | |
2343 // uncompressed values directly. | |
2344 __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); | |
2345 if (basic_type != T_OBJECT) { | |
2346 __ cmp(tmp, tmp2); | |
2347 __ br(Assembler::notEqual, false, Assembler::pn, halt); | |
2348 // load the raw value of the src klass. | |
2349 __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2); | |
3839 | 2350 __ cmp_and_br_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); |
2002 | 2351 } else { |
2352 __ cmp(tmp, tmp2); | |
2353 __ br(Assembler::equal, false, Assembler::pn, known_ok); | |
2354 __ delayed()->cmp(src, dst); | |
2355 __ brx(Assembler::equal, false, Assembler::pn, known_ok); | |
2356 __ delayed()->nop(); | |
2357 } | |
0 | 2358 } else { |
2002 | 2359 __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); |
2360 if (basic_type != T_OBJECT) { | |
2361 __ cmp(tmp, tmp2); | |
2362 __ brx(Assembler::notEqual, false, Assembler::pn, halt); | |
2363 __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); | |
3839 | 2364 __ cmp_and_brx_short(tmp, tmp2, Assembler::equal, Assembler::pn, known_ok); |
2002 | 2365 } else { |
2366 __ cmp(tmp, tmp2); | |
2367 __ brx(Assembler::equal, false, Assembler::pn, known_ok); | |
2368 __ delayed()->cmp(src, dst); | |
2369 __ brx(Assembler::equal, false, Assembler::pn, known_ok); | |
2370 __ delayed()->nop(); | |
2371 } | |
0 | 2372 } |
2373 __ bind(halt); | |
2374 __ stop("incorrect type information in arraycopy"); | |
2375 __ bind(known_ok); | |
2376 } | |
2377 #endif | |
2378 | |
2446 | 2379 #ifndef PRODUCT |
2380 if (PrintC1Statistics) { | |
2381 address counter = Runtime1::arraycopy_count_address(basic_type); | |
2382 __ inc_counter(counter, G1, G3); | |
2383 } | |
2384 #endif | |
0 | 2385 |
2386 Register src_ptr = O0; | |
2387 Register dst_ptr = O1; | |
2388 Register len = O2; | |
2389 | |
2390 __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr); | |
2391 if (shift == 0) { | |
2392 __ add(src_ptr, src_pos, src_ptr); | |
2393 } else { | |
2394 __ sll(src_pos, shift, tmp); | |
2395 __ add(src_ptr, tmp, src_ptr); | |
2396 } | |
2397 | |
2398 __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr); | |
2399 if (shift == 0) { | |
2400 __ add(dst_ptr, dst_pos, dst_ptr); | |
2401 } else { | |
2402 __ sll(dst_pos, shift, tmp); | |
2403 __ add(dst_ptr, tmp, dst_ptr); | |
2404 } | |
2405 | |
2446 | 2406 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; |
2407 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; | |
2408 const char *name; | |
2409 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); | |
2410 | |
2411 // arraycopy stubs takes a length in number of elements, so don't scale it. | |
2412 __ mov(length, len); | |
2413 __ call_VM_leaf(tmp, entry); | |
0 | 2414 |
2415 __ bind(*stub->continuation()); | |
2416 } | |
2417 | |
2418 | |
2419 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { | |
2420 if (dest->is_single_cpu()) { | |
2421 #ifdef _LP64 | |
2422 if (left->type() == T_OBJECT) { | |
2423 switch (code) { | |
2424 case lir_shl: __ sllx (left->as_register(), count->as_register(), dest->as_register()); break; | |
2425 case lir_shr: __ srax (left->as_register(), count->as_register(), dest->as_register()); break; | |
2426 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; | |
2427 default: ShouldNotReachHere(); | |
2428 } | |
2429 } else | |
2430 #endif | |
2431 switch (code) { | |
2432 case lir_shl: __ sll (left->as_register(), count->as_register(), dest->as_register()); break; | |
2433 case lir_shr: __ sra (left->as_register(), count->as_register(), dest->as_register()); break; | |
2434 case lir_ushr: __ srl (left->as_register(), count->as_register(), dest->as_register()); break; | |
2435 default: ShouldNotReachHere(); | |
2436 } | |
2437 } else { | |
2438 #ifdef _LP64 | |
2439 switch (code) { | |
2440 case lir_shl: __ sllx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; | |
2441 case lir_shr: __ srax (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; | |
2442 case lir_ushr: __ srlx (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break; | |
2443 default: ShouldNotReachHere(); | |
2444 } | |
2445 #else | |
2446 switch (code) { | |
2447 case lir_shl: __ lshl (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; | |
2448 case lir_shr: __ lshr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; | |
2449 case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break; | |
2450 default: ShouldNotReachHere(); | |
2451 } | |
2452 #endif | |
2453 } | |
2454 } | |
2455 | |
2456 | |
2457 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { | |
2458 #ifdef _LP64 | |
2459 if (left->type() == T_OBJECT) { | |
2460 count = count & 63; // shouldn't shift by more than sizeof(intptr_t) | |
2461 Register l = left->as_register(); | |
2462 Register d = dest->as_register_lo(); | |
2463 switch (code) { | |
2464 case lir_shl: __ sllx (l, count, d); break; | |
2465 case lir_shr: __ srax (l, count, d); break; | |
2466 case lir_ushr: __ srlx (l, count, d); break; | |
2467 default: ShouldNotReachHere(); | |
2468 } | |
2469 return; | |
2470 } | |
2471 #endif | |
2472 | |
2473 if (dest->is_single_cpu()) { | |
2474 count = count & 0x1F; // Java spec | |
2475 switch (code) { | |
2476 case lir_shl: __ sll (left->as_register(), count, dest->as_register()); break; | |
2477 case lir_shr: __ sra (left->as_register(), count, dest->as_register()); break; | |
2478 case lir_ushr: __ srl (left->as_register(), count, dest->as_register()); break; | |
2479 default: ShouldNotReachHere(); | |
2480 } | |
2481 } else if (dest->is_double_cpu()) { | |
2482 count = count & 63; // Java spec | |
2483 switch (code) { | |
2484 case lir_shl: __ sllx (left->as_pointer_register(), count, dest->as_pointer_register()); break; | |
2485 case lir_shr: __ srax (left->as_pointer_register(), count, dest->as_pointer_register()); break; | |
2486 case lir_ushr: __ srlx (left->as_pointer_register(), count, dest->as_pointer_register()); break; | |
2487 default: ShouldNotReachHere(); | |
2488 } | |
2489 } else { | |
2490 ShouldNotReachHere(); | |
2491 } | |
2492 } | |
2493 | |
2494 | |
2495 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { | |
2496 assert(op->tmp1()->as_register() == G1 && | |
2497 op->tmp2()->as_register() == G3 && | |
2498 op->tmp3()->as_register() == G4 && | |
2499 op->obj()->as_register() == O0 && | |
2500 op->klass()->as_register() == G5, "must be"); | |
2501 if (op->init_check()) { | |
4739
52b5d32fbfaf
7117052: instanceKlass::_init_state can be u1 type
coleenp
parents:
4052
diff
changeset
|
2502 __ ldub(op->klass()->as_register(), |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2503 in_bytes(InstanceKlass::init_state_offset()), |
0 | 2504 op->tmp1()->as_register()); |
2505 add_debug_info_for_null_check_here(op->stub()->info()); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2506 __ cmp(op->tmp1()->as_register(), InstanceKlass::fully_initialized); |
0 | 2507 __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry()); |
2508 __ delayed()->nop(); | |
2509 } | |
2510 __ allocate_object(op->obj()->as_register(), | |
2511 op->tmp1()->as_register(), | |
2512 op->tmp2()->as_register(), | |
2513 op->tmp3()->as_register(), | |
2514 op->header_size(), | |
2515 op->object_size(), | |
2516 op->klass()->as_register(), | |
2517 *op->stub()->entry()); | |
2518 __ bind(*op->stub()->continuation()); | |
2519 __ verify_oop(op->obj()->as_register()); | |
2520 } | |
2521 | |
2522 | |
2523 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { | |
2524 assert(op->tmp1()->as_register() == G1 && | |
2525 op->tmp2()->as_register() == G3 && | |
2526 op->tmp3()->as_register() == G4 && | |
2527 op->tmp4()->as_register() == O1 && | |
2528 op->klass()->as_register() == G5, "must be"); | |
2112
55f868e91c3b
7010618: C1: array length should be treated at int on 64bit during array allocation
iveresov
parents:
2089
diff
changeset
|
2529 |
55f868e91c3b
7010618: C1: array length should be treated at int on 64bit during array allocation
iveresov
parents:
2089
diff
changeset
|
2530 LP64_ONLY( __ signx(op->len()->as_register()); ) |
0 | 2531 if (UseSlowPath || |
2532 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || | |
2533 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { | |
1378
9f5b60a14736
6939930: exception unwind changes in 6919934 hurts compilation speed
never
parents:
1369
diff
changeset
|
2534 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); |
0 | 2535 __ delayed()->nop(); |
2536 } else { | |
2537 __ allocate_array(op->obj()->as_register(), | |
2538 op->len()->as_register(), | |
2539 op->tmp1()->as_register(), | |
2540 op->tmp2()->as_register(), | |
2541 op->tmp3()->as_register(), | |
2542 arrayOopDesc::header_size(op->type()), | |
29
d5fc211aea19
6633953: type2aelembytes{T_ADDRESS} should be 8 bytes in 64 bit VM
kvn
parents:
0
diff
changeset
|
2543 type2aelembytes(op->type()), |
0 | 2544 op->klass()->as_register(), |
2545 *op->stub()->entry()); | |
2546 } | |
2547 __ bind(*op->stub()->continuation()); | |
2548 } | |
2549 | |
2550 | |
1783 | 2551 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, |
2552 ciMethodData *md, ciProfileData *data, | |
2553 Register recv, Register tmp1, Label* update_done) { | |
2554 uint i; | |
2555 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
2556 Label next_test; | |
2557 // See if the receiver is receiver[n]. | |
2558 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - | |
2559 mdo_offset_bias); | |
2560 __ ld_ptr(receiver_addr, tmp1); | |
2561 __ verify_oop(tmp1); | |
3839 | 2562 __ cmp_and_brx_short(recv, tmp1, Assembler::notEqual, Assembler::pt, next_test); |
1783 | 2563 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - |
2564 mdo_offset_bias); | |
2565 __ ld_ptr(data_addr, tmp1); | |
2566 __ add(tmp1, DataLayout::counter_increment, tmp1); | |
2567 __ st_ptr(tmp1, data_addr); | |
3839 | 2568 __ ba(*update_done); |
1783 | 2569 __ delayed()->nop(); |
2570 __ bind(next_test); | |
2571 } | |
2572 | |
2573 // Didn't find receiver; find next empty slot and fill it in | |
2574 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
2575 Label next_test; | |
2576 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - | |
2577 mdo_offset_bias); | |
2002 | 2578 __ ld_ptr(recv_addr, tmp1); |
3839 | 2579 __ br_notnull_short(tmp1, Assembler::pt, next_test); |
1783 | 2580 __ st_ptr(recv, recv_addr); |
2581 __ set(DataLayout::counter_increment, tmp1); | |
2582 __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - | |
2583 mdo_offset_bias); | |
3839 | 2584 __ ba(*update_done); |
1783 | 2585 __ delayed()->nop(); |
2586 __ bind(next_test); | |
2587 } | |
2588 } | |
2589 | |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2590 |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2591 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2592 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { |
2007
5ddfcf4b079e
7003554: (tiered) assert(is_null_object() || handle() != NULL) failed: cannot embed null pointer
iveresov
parents:
2002
diff
changeset
|
2593 md = method->method_data_or_null(); |
5ddfcf4b079e
7003554: (tiered) assert(is_null_object() || handle() != NULL) failed: cannot embed null pointer
iveresov
parents:
2002
diff
changeset
|
2594 assert(md != NULL, "Sanity"); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2595 data = md->bci_to_data(bci); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2596 assert(data != NULL, "need data for checkcast"); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2597 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2598 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2599 // The offset is large so bias the mdo by the base of the slot so |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2600 // that the ld can use simm13s to reference the slots of the data |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2601 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2602 } |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2603 } |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2604 |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2605 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { |
1783 | 2606 // we always need a stub for the failure case. |
2607 CodeStub* stub = op->stub(); | |
2608 Register obj = op->object()->as_register(); | |
2609 Register k_RInfo = op->tmp1()->as_register(); | |
2610 Register klass_RInfo = op->tmp2()->as_register(); | |
2611 Register dst = op->result_opr()->as_register(); | |
2612 Register Rtmp1 = op->tmp3()->as_register(); | |
2613 ciKlass* k = op->klass(); | |
2614 | |
2615 | |
2616 if (obj == k_RInfo) { | |
2617 k_RInfo = klass_RInfo; | |
2618 klass_RInfo = obj; | |
2619 } | |
2620 | |
2621 ciMethodData* md; | |
2622 ciProfileData* data; | |
2623 int mdo_offset_bias = 0; | |
2624 if (op->should_profile()) { | |
2625 ciMethod* method = op->profiled_method(); | |
2626 assert(method != NULL, "Should have method"); | |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2627 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2628 |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2629 Label not_null; |
3839 | 2630 __ br_notnull_short(obj, Assembler::pn, not_null); |
1783 | 2631 Register mdo = k_RInfo; |
2632 Register data_val = Rtmp1; | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2633 metadata2reg(md->constant_encoding(), mdo); |
1783 | 2634 if (mdo_offset_bias > 0) { |
2635 __ set(mdo_offset_bias, data_val); | |
2636 __ add(mdo, data_val, mdo); | |
2637 } | |
2638 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); | |
2639 __ ldub(flags_addr, data_val); | |
2640 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); | |
2641 __ stb(data_val, flags_addr); | |
3839 | 2642 __ ba(*obj_is_null); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2643 __ delayed()->nop(); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2644 __ bind(not_null); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2645 } else { |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2646 __ br_null(obj, false, Assembler::pn, *obj_is_null); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2647 __ delayed()->nop(); |
1783 | 2648 } |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2649 |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2650 Label profile_cast_failure, profile_cast_success; |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2651 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2652 Label *success_target = op->should_profile() ? &profile_cast_success : success; |
1783 | 2653 |
2654 // patching may screw with our temporaries on sparc, | |
2655 // so let's do it before loading the class | |
2656 if (k->is_loaded()) { | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2657 metadata2reg(k->constant_encoding(), k_RInfo); |
1783 | 2658 } else { |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2659 klass2reg_with_patching(k_RInfo, op->info_for_patch()); |
1783 | 2660 } |
2661 assert(obj != k_RInfo, "must be different"); | |
2662 | |
2663 // get object class | |
2664 // not a safepoint as obj null check happens earlier | |
2002 | 2665 __ load_klass(obj, klass_RInfo); |
1783 | 2666 if (op->fast_check()) { |
2667 assert_different_registers(klass_RInfo, k_RInfo); | |
2668 __ cmp(k_RInfo, klass_RInfo); | |
2669 __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target); | |
2670 __ delayed()->nop(); | |
2671 } else { | |
2672 bool need_slow_path = true; | |
2673 if (k->is_loaded()) { | |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4114
diff
changeset
|
2674 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) |
1783 | 2675 need_slow_path = false; |
2676 // perform the fast part of the checking logic | |
2677 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg, | |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2678 (need_slow_path ? success_target : NULL), |
1783 | 2679 failure_target, NULL, |
2680 RegisterOrConstant(k->super_check_offset())); | |
2681 } else { | |
2682 // perform the fast part of the checking logic | |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2683 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, |
1783 | 2684 failure_target, NULL); |
2685 } | |
2686 if (need_slow_path) { | |
2687 // call out-of-line instance of __ check_klass_subtype_slow_path(...): | |
2688 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); | |
2689 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); | |
2690 __ delayed()->nop(); | |
2691 __ cmp(G3, 0); | |
2692 __ br(Assembler::equal, false, Assembler::pn, *failure_target); | |
2693 __ delayed()->nop(); | |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2694 // Fall through to success case |
1783 | 2695 } |
2696 } | |
2697 | |
2698 if (op->should_profile()) { | |
2699 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; | |
2700 assert_different_registers(obj, mdo, recv, tmp1); | |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2701 __ bind(profile_cast_success); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2702 metadata2reg(md->constant_encoding(), mdo); |
1783 | 2703 if (mdo_offset_bias > 0) { |
2704 __ set(mdo_offset_bias, tmp1); | |
2705 __ add(mdo, tmp1, mdo); | |
2706 } | |
2002 | 2707 __ load_klass(obj, recv); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2708 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); |
1783 | 2709 // Jump over the failure case |
3839 | 2710 __ ba(*success); |
1783 | 2711 __ delayed()->nop(); |
2712 // Cast failure case | |
2713 __ bind(profile_cast_failure); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2714 metadata2reg(md->constant_encoding(), mdo); |
1783 | 2715 if (mdo_offset_bias > 0) { |
2716 __ set(mdo_offset_bias, tmp1); | |
2717 __ add(mdo, tmp1, mdo); | |
2718 } | |
2719 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); | |
2720 __ ld_ptr(data_addr, tmp1); | |
2721 __ sub(tmp1, DataLayout::counter_increment, tmp1); | |
2722 __ st_ptr(tmp1, data_addr); | |
3839 | 2723 __ ba(*failure); |
1783 | 2724 __ delayed()->nop(); |
2725 } | |
3839 | 2726 __ ba(*success); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2727 __ delayed()->nop(); |
1783 | 2728 } |
2729 | |
0 | 2730 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { |
2731 LIR_Code code = op->code(); | |
2732 if (code == lir_store_check) { | |
2733 Register value = op->object()->as_register(); | |
2734 Register array = op->array()->as_register(); | |
2735 Register k_RInfo = op->tmp1()->as_register(); | |
2736 Register klass_RInfo = op->tmp2()->as_register(); | |
2737 Register Rtmp1 = op->tmp3()->as_register(); | |
2738 | |
2739 __ verify_oop(value); | |
2740 CodeStub* stub = op->stub(); | |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2741 // check if it needs to be profiled |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2742 ciMethodData* md; |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2743 ciProfileData* data; |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2744 int mdo_offset_bias = 0; |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2745 if (op->should_profile()) { |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2746 ciMethod* method = op->profiled_method(); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2747 assert(method != NULL, "Should have method"); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2748 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2749 } |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2750 Label profile_cast_success, profile_cast_failure, done; |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2751 Label *success_target = op->should_profile() ? &profile_cast_success : &done; |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2752 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2753 |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2754 if (op->should_profile()) { |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2755 Label not_null; |
3839 | 2756 __ br_notnull_short(value, Assembler::pn, not_null); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2757 Register mdo = k_RInfo; |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2758 Register data_val = Rtmp1; |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2759 metadata2reg(md->constant_encoding(), mdo); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2760 if (mdo_offset_bias > 0) { |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2761 __ set(mdo_offset_bias, data_val); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2762 __ add(mdo, data_val, mdo); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2763 } |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2764 Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2765 __ ldub(flags_addr, data_val); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2766 __ or3(data_val, BitData::null_seen_byte_constant(), data_val); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2767 __ stb(data_val, flags_addr); |
3839 | 2768 __ ba_short(done); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2769 __ bind(not_null); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2770 } else { |
3839 | 2771 __ br_null_short(value, Assembler::pn, done); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2772 } |
2002 | 2773 add_debug_info_for_null_check_here(op->info_for_exception()); |
2774 __ load_klass(array, k_RInfo); | |
2775 __ load_klass(value, klass_RInfo); | |
0 | 2776 |
2777 // get instance klass | |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4114
diff
changeset
|
2778 __ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset()), k_RInfo); |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2779 // perform the fast part of the checking logic |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2780 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL); |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2781 |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2782 // call out-of-line instance of __ check_klass_subtype_slow_path(...): |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
356
diff
changeset
|
2783 assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup"); |
0 | 2784 __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); |
2785 __ delayed()->nop(); | |
2786 __ cmp(G3, 0); | |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2787 __ br(Assembler::equal, false, Assembler::pn, *failure_target); |
0 | 2788 __ delayed()->nop(); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2789 // fall through to the success case |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2790 |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2791 if (op->should_profile()) { |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2792 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2793 assert_different_registers(value, mdo, recv, tmp1); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2794 __ bind(profile_cast_success); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2795 metadata2reg(md->constant_encoding(), mdo); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2796 if (mdo_offset_bias > 0) { |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2797 __ set(mdo_offset_bias, tmp1); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2798 __ add(mdo, tmp1, mdo); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2799 } |
2002 | 2800 __ load_klass(value, recv); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2801 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); |
3839 | 2802 __ ba_short(done); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2803 // Cast failure case |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2804 __ bind(profile_cast_failure); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
2805 metadata2reg(md->constant_encoding(), mdo); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2806 if (mdo_offset_bias > 0) { |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2807 __ set(mdo_offset_bias, tmp1); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2808 __ add(mdo, tmp1, mdo); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2809 } |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2810 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2811 __ ld_ptr(data_addr, tmp1); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2812 __ sub(tmp1, DataLayout::counter_increment, tmp1); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2813 __ st_ptr(tmp1, data_addr); |
3839 | 2814 __ ba(*stub->entry()); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2815 __ delayed()->nop(); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2816 } |
0 | 2817 __ bind(done); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2818 } else if (code == lir_checkcast) { |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2819 Register obj = op->object()->as_register(); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2820 Register dst = op->result_opr()->as_register(); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2821 Label success; |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2822 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2823 __ bind(success); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2824 __ mov(obj, dst); |
0 | 2825 } else if (code == lir_instanceof) { |
2826 Register obj = op->object()->as_register(); | |
2827 Register dst = op->result_opr()->as_register(); | |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2828 Label success, failure, done; |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2829 emit_typecheck_helper(op, &success, &failure, &failure); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2830 __ bind(failure); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2831 __ set(0, dst); |
3839 | 2832 __ ba_short(done); |
1791
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2833 __ bind(success); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2834 __ set(1, dst); |
3a294e483abc
6919069: client compiler needs to capture more profile information for tiered work
iveresov
parents:
1783
diff
changeset
|
2835 __ bind(done); |
0 | 2836 } else { |
2837 ShouldNotReachHere(); | |
2838 } | |
2839 | |
2840 } | |
2841 | |
2842 | |
2843 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { | |
2844 if (op->code() == lir_cas_long) { | |
2845 assert(VM_Version::supports_cx8(), "wrong machine"); | |
2846 Register addr = op->addr()->as_pointer_register(); | |
2847 Register cmp_value_lo = op->cmp_value()->as_register_lo(); | |
2848 Register cmp_value_hi = op->cmp_value()->as_register_hi(); | |
2849 Register new_value_lo = op->new_value()->as_register_lo(); | |
2850 Register new_value_hi = op->new_value()->as_register_hi(); | |
2851 Register t1 = op->tmp1()->as_register(); | |
2852 Register t2 = op->tmp2()->as_register(); | |
2853 #ifdef _LP64 | |
2854 __ mov(cmp_value_lo, t1); | |
2855 __ mov(new_value_lo, t2); | |
2089
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
2856 // perform the compare and swap operation |
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
2857 __ casx(addr, t1, t2); |
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
2858 // generate condition code - if the swap succeeded, t2 ("new value" reg) was |
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
2859 // overwritten with the original value in "addr" and will be equal to t1. |
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
2860 __ cmp(t1, t2); |
0 | 2861 #else |
2862 // move high and low halves of long values into single registers | |
2863 __ sllx(cmp_value_hi, 32, t1); // shift high half into temp reg | |
2864 __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half | |
2865 __ or3(t1, cmp_value_lo, t1); // t1 holds 64-bit compare value | |
2866 __ sllx(new_value_hi, 32, t2); | |
2867 __ srl(new_value_lo, 0, new_value_lo); | |
2868 __ or3(t2, new_value_lo, t2); // t2 holds 64-bit value to swap | |
2869 // perform the compare and swap operation | |
2870 __ casx(addr, t1, t2); | |
2871 // generate condition code - if the swap succeeded, t2 ("new value" reg) was | |
2872 // overwritten with the original value in "addr" and will be equal to t1. | |
2089
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
2873 // Produce icc flag for 32bit. |
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
2874 __ sub(t1, t2, t2); |
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
2875 __ srlx(t2, 32, t1); |
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
2876 __ orcc(t2, t1, G0); |
037c727f35fb
7009231: C1: Incorrect CAS code for longs on SPARC 32bit
iveresov
parents:
2010
diff
changeset
|
2877 #endif |
0 | 2878 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { |
2879 Register addr = op->addr()->as_pointer_register(); | |
2880 Register cmp_value = op->cmp_value()->as_register(); | |
2881 Register new_value = op->new_value()->as_register(); | |
2882 Register t1 = op->tmp1()->as_register(); | |
2883 Register t2 = op->tmp2()->as_register(); | |
2884 __ mov(cmp_value, t1); | |
2885 __ mov(new_value, t2); | |
2886 if (op->code() == lir_cas_obj) { | |
2002 | 2887 if (UseCompressedOops) { |
2888 __ encode_heap_oop(t1); | |
2889 __ encode_heap_oop(t2); | |
0 | 2890 __ cas(addr, t1, t2); |
2002 | 2891 } else { |
2010 | 2892 __ cas_ptr(addr, t1, t2); |
0 | 2893 } |
2002 | 2894 } else { |
2895 __ cas(addr, t1, t2); | |
2896 } | |
0 | 2897 __ cmp(t1, t2); |
2898 } else { | |
2899 Unimplemented(); | |
2900 } | |
2901 } | |
2902 | |
2903 void LIR_Assembler::set_24bit_FPU() { | |
2904 Unimplemented(); | |
2905 } | |
2906 | |
2907 | |
2908 void LIR_Assembler::reset_FPU() { | |
2909 Unimplemented(); | |
2910 } | |
2911 | |
2912 | |
2913 void LIR_Assembler::breakpoint() { | |
2914 __ breakpoint_trap(); | |
2915 } | |
2916 | |
2917 | |
2918 void LIR_Assembler::push(LIR_Opr opr) { | |
2919 Unimplemented(); | |
2920 } | |
2921 | |
2922 | |
2923 void LIR_Assembler::pop(LIR_Opr opr) { | |
2924 Unimplemented(); | |
2925 } | |
2926 | |
2927 | |
2928 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { | |
2929 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); | |
2930 Register dst = dst_opr->as_register(); | |
2931 Register reg = mon_addr.base(); | |
2932 int offset = mon_addr.disp(); | |
2933 // compute pointer to BasicLock | |
2934 if (mon_addr.is_simm13()) { | |
2935 __ add(reg, offset, dst); | |
2936 } else { | |
2937 __ set(offset, dst); | |
2938 __ add(dst, reg, dst); | |
2939 } | |
2940 } | |
2941 | |
2942 | |
2943 void LIR_Assembler::emit_lock(LIR_OpLock* op) { | |
2944 Register obj = op->obj_opr()->as_register(); | |
2945 Register hdr = op->hdr_opr()->as_register(); | |
2946 Register lock = op->lock_opr()->as_register(); | |
2947 | |
2948 // obj may not be an oop | |
2949 if (op->code() == lir_lock) { | |
2950 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); | |
2951 if (UseFastLocking) { | |
2952 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); | |
2953 // add debug info for NullPointerException only if one is possible | |
2954 if (op->info() != NULL) { | |
2955 add_debug_info_for_null_check_here(op->info()); | |
2956 } | |
2957 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); | |
2958 } else { | |
2959 // always do slow locking | |
2960 // note: the slow locking code could be inlined here, however if we use | |
2961 // slow locking, speed doesn't matter anyway and this solution is | |
2962 // simpler and requires less duplicated code - additionally, the | |
2963 // slow locking code is the same in either case which simplifies | |
2964 // debugging | |
2965 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); | |
2966 __ delayed()->nop(); | |
2967 } | |
2968 } else { | |
2969 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); | |
2970 if (UseFastLocking) { | |
2971 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); | |
2972 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); | |
2973 } else { | |
2974 // always do slow unlocking | |
2975 // note: the slow unlocking code could be inlined here, however if we use | |
2976 // slow unlocking, speed doesn't matter anyway and this solution is | |
2977 // simpler and requires less duplicated code - additionally, the | |
2978 // slow unlocking code is the same in either case which simplifies | |
2979 // debugging | |
2980 __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry()); | |
2981 __ delayed()->nop(); | |
2982 } | |
2983 } | |
2984 __ bind(*op->stub()->continuation()); | |
2985 } | |
2986 | |
2987 | |
2988 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { | |
2989 ciMethod* method = op->profiled_method(); | |
2990 int bci = op->profiled_bci(); | |
6266
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6057
diff
changeset
|
2991 ciMethod* callee = op->profiled_callee(); |
0 | 2992 |
2993 // Update counter for all call types | |
2007
5ddfcf4b079e
7003554: (tiered) assert(is_null_object() || handle() != NULL) failed: cannot embed null pointer
iveresov
parents:
2002
diff
changeset
|
2994 ciMethodData* md = method->method_data_or_null(); |
5ddfcf4b079e
7003554: (tiered) assert(is_null_object() || handle() != NULL) failed: cannot embed null pointer
iveresov
parents:
2002
diff
changeset
|
2995 assert(md != NULL, "Sanity"); |
0 | 2996 ciProfileData* data = md->bci_to_data(bci); |
2997 assert(data->is_CounterData(), "need CounterData for calls"); | |
2998 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); | |
1783 | 2999 Register mdo = op->mdo()->as_register(); |
3000 #ifdef _LP64 | |
3001 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); | |
3002 Register tmp1 = op->tmp1()->as_register_lo(); | |
3003 #else | |
0 | 3004 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); |
3005 Register tmp1 = op->tmp1()->as_register(); | |
1783 | 3006 #endif |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
3007 metadata2reg(md->constant_encoding(), mdo); |
0 | 3008 int mdo_offset_bias = 0; |
3009 if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) + | |
3010 data->size_in_bytes())) { | |
3011 // The offset is large so bias the mdo by the base of the slot so | |
3012 // that the ld can use simm13s to reference the slots of the data | |
3013 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); | |
3014 __ set(mdo_offset_bias, O7); | |
3015 __ add(mdo, O7, mdo); | |
3016 } | |
3017 | |
727 | 3018 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); |
0 | 3019 Bytecodes::Code bc = method->java_code_at_bci(bci); |
6266
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6057
diff
changeset
|
3020 const bool callee_is_static = callee->is_loaded() && callee->is_static(); |
0 | 3021 // Perform additional virtual call profiling for invokevirtual and |
3022 // invokeinterface bytecodes | |
3023 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && | |
6266
1d7922586cf6
7023639: JSR 292 method handle invocation needs a fast path for compiled code
twisti
parents:
6057
diff
changeset
|
3024 !callee_is_static && // required for optimized MH invokes |
1783 | 3025 C1ProfileVirtualCalls) { |
0 | 3026 assert(op->recv()->is_single_cpu(), "recv must be allocated"); |
3027 Register recv = op->recv()->as_register(); | |
3028 assert_different_registers(mdo, tmp1, recv); | |
3029 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); | |
3030 ciKlass* known_klass = op->known_holder(); | |
1783 | 3031 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { |
0 | 3032 // We know the type that will be seen at this call site; we can |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
3033 // statically update the MethodData* rather than needing to do |
0 | 3034 // dynamic tests on the receiver type |
3035 | |
3036 // NOTE: we should probably put a lock around this search to | |
3037 // avoid collisions by concurrent compilations | |
3038 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; | |
3039 uint i; | |
3040 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
3041 ciKlass* receiver = vc_data->receiver(i); | |
3042 if (known_klass->equals(receiver)) { | |
727 | 3043 Address data_addr(mdo, md->byte_offset_of_slot(data, |
3044 VirtualCallData::receiver_count_offset(i)) - | |
0 | 3045 mdo_offset_bias); |
1783 | 3046 __ ld_ptr(data_addr, tmp1); |
0 | 3047 __ add(tmp1, DataLayout::counter_increment, tmp1); |
1783 | 3048 __ st_ptr(tmp1, data_addr); |
0 | 3049 return; |
3050 } | |
3051 } | |
3052 | |
3053 // Receiver type not found in profile data; select an empty slot | |
3054 | |
3055 // Note that this is less efficient than it should be because it | |
3056 // always does a write to the receiver part of the | |
3057 // VirtualCallData rather than just the first time | |
3058 for (i = 0; i < VirtualCallData::row_limit(); i++) { | |
3059 ciKlass* receiver = vc_data->receiver(i); | |
3060 if (receiver == NULL) { | |
727 | 3061 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - |
0 | 3062 mdo_offset_bias); |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
6266
diff
changeset
|
3063 metadata2reg(known_klass->constant_encoding(), tmp1); |
0 | 3064 __ st_ptr(tmp1, recv_addr); |
727 | 3065 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - |
0 | 3066 mdo_offset_bias); |
1783 | 3067 __ ld_ptr(data_addr, tmp1); |
0 | 3068 __ add(tmp1, DataLayout::counter_increment, tmp1); |
1783 | 3069 __ st_ptr(tmp1, data_addr); |
0 | 3070 return; |
3071 } | |
3072 } | |
3073 } else { | |
2002 | 3074 __ load_klass(recv, recv); |
0 | 3075 Label update_done; |
1783 | 3076 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); |
1251
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
3077 // Receiver did not match any saved receiver and there is no empty row for it. |
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
3078 // Increment total counter to indicate polymorphic case. |
1783 | 3079 __ ld_ptr(counter_addr, tmp1); |
1251
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
3080 __ add(tmp1, DataLayout::counter_increment, tmp1); |
1783 | 3081 __ st_ptr(tmp1, counter_addr); |
0 | 3082 |
3083 __ bind(update_done); | |
3084 } | |
1251
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
3085 } else { |
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
3086 // Static call |
1783 | 3087 __ ld_ptr(counter_addr, tmp1); |
1251
576e77447e3c
6923002: assert(false,"this call site should not be polymorphic")
kvn
parents:
1204
diff
changeset
|
3088 __ add(tmp1, DataLayout::counter_increment, tmp1); |
1783 | 3089 __ st_ptr(tmp1, counter_addr); |
0 | 3090 } |
3091 } | |
3092 | |
3093 void LIR_Assembler::align_backward_branch_target() { | |
1365 | 3094 __ align(OptoLoopAlignment); |
0 | 3095 } |
3096 | |
3097 | |
3098 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { | |
3099 // make sure we are expecting a delay | |
3100 // this has the side effect of clearing the delay state | |
3101 // so we can use _masm instead of _masm->delayed() to do the | |
3102 // code generation. | |
3103 __ delayed(); | |
3104 | |
3105 // make sure we only emit one instruction | |
3106 int offset = code_offset(); | |
3107 op->delay_op()->emit_code(this); | |
3108 #ifdef ASSERT | |
3109 if (code_offset() - offset != NativeInstruction::nop_instruction_size) { | |
3110 op->delay_op()->print(); | |
3111 } | |
3112 assert(code_offset() - offset == NativeInstruction::nop_instruction_size, | |
3113 "only one instruction can go in a delay slot"); | |
3114 #endif | |
3115 | |
3116 // we may also be emitting the call info for the instruction | |
3117 // which we are the delay slot of. | |
1564 | 3118 CodeEmitInfo* call_info = op->call_info(); |
0 | 3119 if (call_info) { |
3120 add_call_info(code_offset(), call_info); | |
3121 } | |
3122 | |
3123 if (VerifyStackAtCalls) { | |
3124 _masm->sub(FP, SP, O7); | |
3125 _masm->cmp(O7, initial_frame_size_in_bytes()); | |
3126 _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 ); | |
3127 } | |
3128 } | |
3129 | |
3130 | |
3131 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { | |
3132 assert(left->is_register(), "can only handle registers"); | |
3133 | |
3134 if (left->is_single_cpu()) { | |
3135 __ neg(left->as_register(), dest->as_register()); | |
3136 } else if (left->is_single_fpu()) { | |
3137 __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg()); | |
3138 } else if (left->is_double_fpu()) { | |
3139 __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg()); | |
3140 } else { | |
3141 assert (left->is_double_cpu(), "Must be a long"); | |
3142 Register Rlow = left->as_register_lo(); | |
3143 Register Rhi = left->as_register_hi(); | |
3144 #ifdef _LP64 | |
3145 __ sub(G0, Rlow, dest->as_register_lo()); | |
3146 #else | |
3147 __ subcc(G0, Rlow, dest->as_register_lo()); | |
3148 __ subc (G0, Rhi, dest->as_register_hi()); | |
3149 #endif | |
3150 } | |
3151 } | |
3152 | |
3153 | |
3154 void LIR_Assembler::fxch(int i) { | |
3155 Unimplemented(); | |
3156 } | |
3157 | |
3158 void LIR_Assembler::fld(int i) { | |
3159 Unimplemented(); | |
3160 } | |
3161 | |
3162 void LIR_Assembler::ffree(int i) { | |
3163 Unimplemented(); | |
3164 } | |
3165 | |
3166 void LIR_Assembler::rt_call(LIR_Opr result, address dest, | |
3167 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { | |
3168 | |
3169 // if tmp is invalid, then the function being called doesn't destroy the thread | |
3170 if (tmp->is_valid()) { | |
3171 __ save_thread(tmp->as_register()); | |
3172 } | |
3173 __ call(dest, relocInfo::runtime_call_type); | |
3174 __ delayed()->nop(); | |
3175 if (info != NULL) { | |
3176 add_call_info_here(info); | |
3177 } | |
3178 if (tmp->is_valid()) { | |
3179 __ restore_thread(tmp->as_register()); | |
3180 } | |
3181 | |
3182 #ifdef ASSERT | |
3183 __ verify_thread(); | |
3184 #endif // ASSERT | |
3185 } | |
3186 | |
3187 | |
3188 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { | |
3189 #ifdef _LP64 | |
3190 ShouldNotReachHere(); | |
3191 #endif | |
3192 | |
3193 NEEDS_CLEANUP; | |
3194 if (type == T_LONG) { | |
3195 LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr(); | |
3196 | |
3197 // (extended to allow indexed as well as constant displaced for JSR-166) | |
3198 Register idx = noreg; // contains either constant offset or index | |
3199 | |
3200 int disp = mem_addr->disp(); | |
3201 if (mem_addr->index() == LIR_OprFact::illegalOpr) { | |
3202 if (!Assembler::is_simm13(disp)) { | |
3203 idx = O7; | |
3204 __ set(disp, idx); | |
3205 } | |
3206 } else { | |
3207 assert(disp == 0, "not both indexed and disp"); | |
3208 idx = mem_addr->index()->as_register(); | |
3209 } | |
3210 | |
3211 int null_check_offset = -1; | |
3212 | |
3213 Register base = mem_addr->base()->as_register(); | |
3214 if (src->is_register() && dest->is_address()) { | |
3215 // G4 is high half, G5 is low half | |
3216 if (VM_Version::v9_instructions_work()) { | |
3217 // clear the top bits of G5, and scale up G4 | |
3218 __ srl (src->as_register_lo(), 0, G5); | |
3219 __ sllx(src->as_register_hi(), 32, G4); | |
3220 // combine the two halves into the 64 bits of G4 | |
3221 __ or3(G4, G5, G4); | |
3222 null_check_offset = __ offset(); | |
3223 if (idx == noreg) { | |
3224 __ stx(G4, base, disp); | |
3225 } else { | |
3226 __ stx(G4, base, idx); | |
3227 } | |
3228 } else { | |
3229 __ mov (src->as_register_hi(), G4); | |
3230 __ mov (src->as_register_lo(), G5); | |
3231 null_check_offset = __ offset(); | |
3232 if (idx == noreg) { | |
3233 __ std(G4, base, disp); | |
3234 } else { | |
3235 __ std(G4, base, idx); | |
3236 } | |
3237 } | |
3238 } else if (src->is_address() && dest->is_register()) { | |
3239 null_check_offset = __ offset(); | |
3240 if (VM_Version::v9_instructions_work()) { | |
3241 if (idx == noreg) { | |
3242 __ ldx(base, disp, G5); | |
3243 } else { | |
3244 __ ldx(base, idx, G5); | |
3245 } | |
3246 __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi | |
3247 __ mov (G5, dest->as_register_lo()); // copy low half into lo | |
3248 } else { | |
3249 if (idx == noreg) { | |
3250 __ ldd(base, disp, G4); | |
3251 } else { | |
3252 __ ldd(base, idx, G4); | |
3253 } | |
3254 // G4 is high half, G5 is low half | |
3255 __ mov (G4, dest->as_register_hi()); | |
3256 __ mov (G5, dest->as_register_lo()); | |
3257 } | |
3258 } else { | |
3259 Unimplemented(); | |
3260 } | |
3261 if (info != NULL) { | |
3262 add_debug_info_for_null_check(null_check_offset, info); | |
3263 } | |
3264 | |
3265 } else { | |
3266 // use normal move for all other volatiles since they don't need | |
3267 // special handling to remain atomic. | |
2002 | 3268 move_op(src, dest, type, lir_patch_none, info, false, false, false); |
0 | 3269 } |
3270 } | |
3271 | |
3272 void LIR_Assembler::membar() { | |
3273 // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode | |
3274 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); | |
3275 } | |
3276 | |
3277 void LIR_Assembler::membar_acquire() { | |
3278 // no-op on TSO | |
3279 } | |
3280 | |
3281 void LIR_Assembler::membar_release() { | |
3282 // no-op on TSO | |
3283 } | |
3284 | |
4966
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3285 void LIR_Assembler::membar_loadload() { |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3286 // no-op |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3287 //__ membar(Assembler::Membar_mask_bits(Assembler::loadload)); |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3288 } |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3289 |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3290 void LIR_Assembler::membar_storestore() { |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3291 // no-op |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3292 //__ membar(Assembler::Membar_mask_bits(Assembler::storestore)); |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3293 } |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3294 |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3295 void LIR_Assembler::membar_loadstore() { |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3296 // no-op |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3297 //__ membar(Assembler::Membar_mask_bits(Assembler::loadstore)); |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3298 } |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3299 |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3300 void LIR_Assembler::membar_storeload() { |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3301 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3302 } |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3303 |
701a83c86f28
7120481: storeStore barrier in constructor with final field
jiangli
parents:
4808
diff
changeset
|
3304 |
1783 | 3305 // Pack two sequential registers containing 32 bit values |
0 | 3306 // into a single 64 bit register. |
1783 | 3307 // src and src->successor() are packed into dst |
3308 // src and dst may be the same register. | |
3309 // Note: src is destroyed | |
3310 void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) { | |
3311 Register rs = src->as_register(); | |
3312 Register rd = dst->as_register_lo(); | |
0 | 3313 __ sllx(rs, 32, rs); |
3314 __ srl(rs->successor(), 0, rs->successor()); | |
3315 __ or3(rs, rs->successor(), rd); | |
3316 } | |
3317 | |
1783 | 3318 // Unpack a 64 bit value in a register into |
0 | 3319 // two sequential registers. |
1783 | 3320 // src is unpacked into dst and dst->successor() |
3321 void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) { | |
3322 Register rs = src->as_register_lo(); | |
3323 Register rd = dst->as_register_hi(); | |
3324 assert_different_registers(rs, rd, rd->successor()); | |
3325 __ srlx(rs, 32, rd); | |
3326 __ srl (rs, 0, rd->successor()); | |
0 | 3327 } |
3328 | |
3329 | |
3330 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { | |
3331 LIR_Address* addr = addr_opr->as_address_ptr(); | |
3332 assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet"); | |
1783 | 3333 |
3334 __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register()); | |
0 | 3335 } |
3336 | |
3337 | |
3338 void LIR_Assembler::get_thread(LIR_Opr result_reg) { | |
3339 assert(result_reg->is_register(), "check"); | |
3340 __ mov(G2_thread, result_reg->as_register()); | |
3341 } | |
3342 | |
3343 | |
3344 void LIR_Assembler::peephole(LIR_List* lir) { | |
3345 LIR_OpList* inst = lir->instructions_list(); | |
3346 for (int i = 0; i < inst->length(); i++) { | |
3347 LIR_Op* op = inst->at(i); | |
3348 switch (op->code()) { | |
3349 case lir_cond_float_branch: | |
3350 case lir_branch: { | |
3351 LIR_OpBranch* branch = op->as_OpBranch(); | |
3352 assert(branch->info() == NULL, "shouldn't be state on branches anymore"); | |
3353 LIR_Op* delay_op = NULL; | |
3354 // we'd like to be able to pull following instructions into | |
3355 // this slot but we don't know enough to do it safely yet so | |
3356 // only optimize block to block control flow. | |
3357 if (LIRFillDelaySlots && branch->block()) { | |
3358 LIR_Op* prev = inst->at(i - 1); | |
3359 if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) { | |
3360 // swap previous instruction into delay slot | |
3361 inst->at_put(i - 1, op); | |
3362 inst->at_put(i, new LIR_OpDelay(prev, op->info())); | |
3363 #ifndef PRODUCT | |
3364 if (LIRTracePeephole) { | |
3365 tty->print_cr("delayed"); | |
3366 inst->at(i - 1)->print(); | |
3367 inst->at(i)->print(); | |
1564 | 3368 tty->cr(); |
0 | 3369 } |
3370 #endif | |
3371 continue; | |
3372 } | |
3373 } | |
3374 | |
3375 if (!delay_op) { | |
3376 delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL); | |
3377 } | |
3378 inst->insert_before(i + 1, delay_op); | |
3379 break; | |
3380 } | |
3381 case lir_static_call: | |
3382 case lir_virtual_call: | |
3383 case lir_icvirtual_call: | |
1564 | 3384 case lir_optvirtual_call: |
3385 case lir_dynamic_call: { | |
0 | 3386 LIR_Op* prev = inst->at(i - 1); |
3387 if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL && | |
3388 (op->code() != lir_virtual_call || | |
3389 !prev->result_opr()->is_single_cpu() || | |
3390 prev->result_opr()->as_register() != O0) && | |
3391 LIR_Assembler::is_single_instruction(prev)) { | |
3392 // Only moves without info can be put into the delay slot. | |
3393 // Also don't allow the setup of the receiver in the delay | |
3394 // slot for vtable calls. | |
3395 inst->at_put(i - 1, op); | |
3396 inst->at_put(i, new LIR_OpDelay(prev, op->info())); | |
3397 #ifndef PRODUCT | |
3398 if (LIRTracePeephole) { | |
3399 tty->print_cr("delayed"); | |
3400 inst->at(i - 1)->print(); | |
3401 inst->at(i)->print(); | |
1564 | 3402 tty->cr(); |
0 | 3403 } |
3404 #endif | |
1783 | 3405 } else { |
3406 LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info()); | |
3407 inst->insert_before(i + 1, delay_op); | |
3408 i++; | |
0 | 3409 } |
3410 | |
1783 | 3411 #if defined(TIERED) && !defined(_LP64) |
3412 // fixup the return value from G1 to O0/O1 for long returns. | |
3413 // It's done here instead of in LIRGenerator because there's | |
3414 // such a mismatch between the single reg and double reg | |
3415 // calling convention. | |
3416 LIR_OpJavaCall* callop = op->as_OpJavaCall(); | |
3417 if (callop->result_opr() == FrameMap::out_long_opr) { | |
3418 LIR_OpJavaCall* call; | |
3419 LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length()); | |
3420 for (int a = 0; a < arguments->length(); a++) { | |
3421 arguments[a] = callop->arguments()[a]; | |
3422 } | |
3423 if (op->code() == lir_virtual_call) { | |
3424 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, | |
3425 callop->vtable_offset(), arguments, callop->info()); | |
3426 } else { | |
3427 call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr, | |
3428 callop->addr(), arguments, callop->info()); | |
3429 } | |
3430 inst->at_put(i - 1, call); | |
3431 inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(), | |
3432 T_LONG, lir_patch_none, NULL)); | |
3433 } | |
3434 #endif | |
0 | 3435 break; |
3436 } | |
3437 } | |
3438 } | |
3439 } | |
3440 | |
3441 | |
3442 | |
3443 | |
3444 #undef __ |