Mercurial > hg > truffle
annotate src/cpu/x86/vm/c1_Runtime1_x86.cpp @ 17716:cdb71841f4bc
6498581: ThreadInterruptTest3 produces wrong output on Windows
Summary: There is race condition between os::interrupt and os::is_interrupted on Windows. In JVM_Sleep(Thread.sleep), check if thread gets interrupted, it may see interrupted but not really interrupted so cause spurious waking up (early return from sleep). Fix by checking if interrupt event really gets set thus prevent false return. For intrinsic of _isInterrupted, on Windows, go fastpath only on bit not set.
Reviewed-by: acorn, kvn
Contributed-by: david.holmes@oracle.com, yumin.qi@oracle.com
author | minqi |
---|---|
date | Wed, 26 Feb 2014 15:20:41 -0800 |
parents | 55fb97c4c58d |
children | d8041d695d19 b5eb829bbce1 |
rev | line source |
---|---|
0 | 1 /* |
17467
55fb97c4c58d
8029233: Update copyright year to match last edit in jdk8 hotspot repository for 2013
mikael
parents:
13424
diff
changeset
|
2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1368
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1368
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1368
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
2415
09f96c3ff1ad
7032388: guarantee(VM_Version::supports_cmov()) failed: illegal instruction on i586 after 6919934
twisti
parents:
2321
diff
changeset
|
26 #include "asm/assembler.hpp" |
1972 | 27 #include "c1/c1_Defs.hpp" |
28 #include "c1/c1_MacroAssembler.hpp" | |
29 #include "c1/c1_Runtime1.hpp" | |
30 #include "interpreter/interpreter.hpp" | |
31 #include "nativeInst_x86.hpp" | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
32 #include "oops/compiledICHolder.hpp" |
1972 | 33 #include "oops/oop.inline.hpp" |
34 #include "prims/jvmtiExport.hpp" | |
35 #include "register_x86.hpp" | |
36 #include "runtime/sharedRuntime.hpp" | |
37 #include "runtime/signature.hpp" | |
38 #include "runtime/vframeArray.hpp" | |
8001
db9981fd3124
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
6725
diff
changeset
|
39 #include "utilities/macros.hpp" |
1972 | 40 #include "vmreg_x86.inline.hpp" |
12835
69944b868a32
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
12160
diff
changeset
|
41 #if INCLUDE_ALL_GCS |
69944b868a32
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
12160
diff
changeset
|
42 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" |
69944b868a32
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
12160
diff
changeset
|
43 #endif |
0 | 44 |
45 | |
46 // Implementation of StubAssembler | |
47 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
48 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { |
0 | 49 // setup registers |
304 | 50 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
51 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
52 assert(oop_result1 != thread && metadata_result != thread, "registers must be different"); |
0 | 53 assert(args_size >= 0, "illegal args_size"); |
5904
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
54 bool align_stack = false; |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
55 #ifdef _LP64 |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
56 // At a method handle call, the stack may not be properly aligned |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
57 // when returning with an exception. |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
58 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id); |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
59 #endif |
0 | 60 |
304 | 61 #ifdef _LP64 |
62 mov(c_rarg0, thread); | |
63 set_num_rt_args(0); // Nothing on stack | |
64 #else | |
0 | 65 set_num_rt_args(1 + args_size); |
66 | |
67 // push java thread (becomes first argument of C function) | |
68 get_thread(thread); | |
304 | 69 push(thread); |
70 #endif // _LP64 | |
0 | 71 |
5904
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
72 int call_offset; |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
73 if (!align_stack) { |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
74 set_last_Java_frame(thread, noreg, rbp, NULL); |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
75 } else { |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
76 address the_pc = pc(); |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
77 call_offset = offset(); |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
78 set_last_Java_frame(thread, noreg, rbp, the_pc); |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
79 andptr(rsp, -(StackAlignmentInBytes)); // Align stack |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
80 } |
304 | 81 |
0 | 82 // do the call |
83 call(RuntimeAddress(entry)); | |
5904
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
84 if (!align_stack) { |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
85 call_offset = offset(); |
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
86 } |
0 | 87 // verify callee-saved register |
88 #ifdef ASSERT | |
89 guarantee(thread != rax, "change this code"); | |
304 | 90 push(rax); |
0 | 91 { Label L; |
92 get_thread(rax); | |
304 | 93 cmpptr(thread, rax); |
0 | 94 jcc(Assembler::equal, L); |
95 int3(); | |
96 stop("StubAssembler::call_RT: rdi not callee saved?"); | |
97 bind(L); | |
98 } | |
304 | 99 pop(rax); |
0 | 100 #endif |
5904
bf7796b7367a
7148486: At a method handle call returning with an exception may call the runtime with misaligned stack (x64)
roland
parents:
4771
diff
changeset
|
101 reset_last_Java_frame(thread, true, align_stack); |
0 | 102 |
103 // discard thread and arguments | |
304 | 104 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); |
0 | 105 |
106 // check for pending exceptions | |
107 { Label L; | |
304 | 108 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); |
0 | 109 jcc(Assembler::equal, L); |
110 // exception pending => remove activation and forward to exception handler | |
304 | 111 movptr(rax, Address(thread, Thread::pending_exception_offset())); |
0 | 112 // make sure that the vm_results are cleared |
113 if (oop_result1->is_valid()) { | |
512
db4caa99ef11
6787106: Hotspot 32 bit build fails on platforms having different definitions for intptr_t & int32_t
xlu
parents:
380
diff
changeset
|
114 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); |
0 | 115 } |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
116 if (metadata_result->is_valid()) { |
512
db4caa99ef11
6787106: Hotspot 32 bit build fails on platforms having different definitions for intptr_t & int32_t
xlu
parents:
380
diff
changeset
|
117 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); |
0 | 118 } |
119 if (frame_size() == no_frame_size) { | |
120 leave(); | |
121 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); | |
122 } else if (_stub_id == Runtime1::forward_exception_id) { | |
123 should_not_reach_here(); | |
124 } else { | |
125 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); | |
126 } | |
127 bind(L); | |
128 } | |
129 // get oop results if there are any and reset the values in the thread | |
130 if (oop_result1->is_valid()) { | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
131 get_vm_result(oop_result1, thread); |
0 | 132 } |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
133 if (metadata_result->is_valid()) { |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
134 get_vm_result_2(metadata_result, thread); |
0 | 135 } |
136 return call_offset; | |
137 } | |
138 | |
139 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
140 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { |
304 | 141 #ifdef _LP64 |
142 mov(c_rarg1, arg1); | |
143 #else | |
144 push(arg1); | |
145 #endif // _LP64 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
146 return call_RT(oop_result1, metadata_result, entry, 1); |
0 | 147 } |
148 | |
149 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
150 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { |
304 | 151 #ifdef _LP64 |
152 if (c_rarg1 == arg2) { | |
153 if (c_rarg2 == arg1) { | |
154 xchgq(arg1, arg2); | |
155 } else { | |
156 mov(c_rarg2, arg2); | |
157 mov(c_rarg1, arg1); | |
158 } | |
159 } else { | |
160 mov(c_rarg1, arg1); | |
161 mov(c_rarg2, arg2); | |
162 } | |
163 #else | |
164 push(arg2); | |
165 push(arg1); | |
166 #endif // _LP64 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
167 return call_RT(oop_result1, metadata_result, entry, 2); |
0 | 168 } |
169 | |
170 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
171 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { |
304 | 172 #ifdef _LP64 |
173 // if there is any conflict use the stack | |
174 if (arg1 == c_rarg2 || arg1 == c_rarg3 || | |
175 arg2 == c_rarg1 || arg1 == c_rarg3 || | |
176 arg3 == c_rarg1 || arg1 == c_rarg2) { | |
177 push(arg3); | |
178 push(arg2); | |
179 push(arg1); | |
180 pop(c_rarg1); | |
181 pop(c_rarg2); | |
182 pop(c_rarg3); | |
183 } else { | |
184 mov(c_rarg1, arg1); | |
185 mov(c_rarg2, arg2); | |
186 mov(c_rarg3, arg3); | |
187 } | |
188 #else | |
189 push(arg3); | |
190 push(arg2); | |
191 push(arg1); | |
192 #endif // _LP64 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
193 return call_RT(oop_result1, metadata_result, entry, 3); |
0 | 194 } |
195 | |
196 | |
197 // Implementation of StubFrame | |
198 | |
199 class StubFrame: public StackObj { | |
200 private: | |
201 StubAssembler* _sasm; | |
202 | |
203 public: | |
204 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); | |
205 void load_argument(int offset_in_words, Register reg); | |
206 | |
207 ~StubFrame(); | |
208 }; | |
209 | |
210 | |
211 #define __ _sasm-> | |
212 | |
213 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { | |
214 _sasm = sasm; | |
215 __ set_info(name, must_gc_arguments); | |
216 __ enter(); | |
217 } | |
218 | |
219 // load parameters that were stored with LIR_Assembler::store_parameter | |
220 // Note: offsets for store_parameter and load_argument must match | |
221 void StubFrame::load_argument(int offset_in_words, Register reg) { | |
222 // rbp, + 0: link | |
223 // + 1: return address | |
224 // + 2: argument with offset 0 | |
225 // + 3: argument with offset 1 | |
226 // + 4: ... | |
227 | |
304 | 228 __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); |
0 | 229 } |
230 | |
231 | |
232 StubFrame::~StubFrame() { | |
233 __ leave(); | |
234 __ ret(0); | |
235 } | |
236 | |
237 #undef __ | |
238 | |
239 | |
240 // Implementation of Runtime1 | |
241 | |
242 #define __ sasm-> | |
243 | |
304 | 244 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; |
245 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; | |
0 | 246 |
247 // Stack layout for saving/restoring all the registers needed during a runtime | |
248 // call (this includes deoptimization) | |
249 // Note: note that users of this frame may well have arguments to some runtime | |
250 // while these values are on the stack. These positions neglect those arguments | |
251 // but the code in save_live_registers will take the argument count into | |
252 // account. | |
253 // | |
304 | 254 #ifdef _LP64 |
255 #define SLOT2(x) x, | |
256 #define SLOT_PER_WORD 2 | |
257 #else | |
258 #define SLOT2(x) | |
259 #define SLOT_PER_WORD 1 | |
260 #endif // _LP64 | |
261 | |
0 | 262 enum reg_save_layout { |
304 | 263 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that |
264 // happen and will assert if the stack size we create is misaligned | |
265 #ifdef _LP64 | |
266 align_dummy_0, align_dummy_1, | |
267 #endif // _LP64 | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
268 #ifdef _WIN64 |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
269 // Windows always allocates space for it's argument registers (see |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
270 // frame::arg_reg_save_area_bytes). |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
271 arg_reg_save_1, arg_reg_save_1H, // 0, 4 |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
272 arg_reg_save_2, arg_reg_save_2H, // 8, 12 |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
273 arg_reg_save_3, arg_reg_save_3H, // 16, 20 |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
274 arg_reg_save_4, arg_reg_save_4H, // 24, 28 |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
275 #endif // _WIN64 |
304 | 276 xmm_regs_as_doubles_off, // 32 |
277 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 | |
278 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 | |
279 // fpu_state_end_off is exclusive | |
280 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 | |
281 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 | |
282 extra_space_offset, // 360 | |
283 #ifdef _LP64 | |
284 r15_off = extra_space_offset, r15H_off, // 360, 364 | |
285 r14_off, r14H_off, // 368, 372 | |
286 r13_off, r13H_off, // 376, 380 | |
287 r12_off, r12H_off, // 384, 388 | |
288 r11_off, r11H_off, // 392, 396 | |
289 r10_off, r10H_off, // 400, 404 | |
290 r9_off, r9H_off, // 408, 412 | |
291 r8_off, r8H_off, // 416, 420 | |
292 rdi_off, rdiH_off, // 424, 428 | |
293 #else | |
0 | 294 rdi_off = extra_space_offset, |
304 | 295 #endif // _LP64 |
296 rsi_off, SLOT2(rsiH_off) // 432, 436 | |
297 rbp_off, SLOT2(rbpH_off) // 440, 444 | |
298 rsp_off, SLOT2(rspH_off) // 448, 452 | |
299 rbx_off, SLOT2(rbxH_off) // 456, 460 | |
300 rdx_off, SLOT2(rdxH_off) // 464, 468 | |
301 rcx_off, SLOT2(rcxH_off) // 472, 476 | |
302 rax_off, SLOT2(raxH_off) // 480, 484 | |
303 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 | |
304 return_off, SLOT2(returnH_off) // 496, 500 | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
305 reg_save_frame_size // As noted: neglects any parameters to runtime // 504 |
0 | 306 }; |
307 | |
308 | |
309 | |
310 // Save off registers which might be killed by calls into the runtime. | |
311 // Tries to smart of about FP registers. In particular we separate | |
312 // saving and describing the FPU registers for deoptimization since we | |
313 // have to save the FPU registers twice if we describe them and on P4 | |
314 // saving FPU registers which don't contain anything appears | |
315 // expensive. The deopt blob is the only thing which needs to | |
316 // describe FPU registers. In all other cases it should be sufficient | |
317 // to simply save their current value. | |
318 | |
319 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, | |
320 bool save_fpu_registers = true) { | |
304 | 321 |
322 // In 64bit all the args are in regs so there are no additional stack slots | |
323 LP64_ONLY(num_rt_args = 0); | |
324 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) | |
325 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread | |
326 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word ); | |
0 | 327 |
328 // record saved value locations in an OopMap | |
329 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread | |
304 | 330 OopMap* map = new OopMap(frame_size_in_slots, 0); |
0 | 331 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); |
332 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); | |
333 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); | |
334 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); | |
335 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); | |
336 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); | |
304 | 337 #ifdef _LP64 |
338 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); | |
339 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); | |
340 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); | |
341 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); | |
342 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); | |
343 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); | |
344 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); | |
345 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); | |
346 | |
347 // This is stupid but needed. | |
348 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); | |
349 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); | |
350 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); | |
351 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); | |
352 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); | |
353 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); | |
354 | |
355 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); | |
356 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); | |
357 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); | |
358 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); | |
359 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); | |
360 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); | |
361 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); | |
362 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); | |
363 #endif // _LP64 | |
0 | 364 |
365 if (save_fpu_registers) { | |
366 if (UseSSE < 2) { | |
367 int fpu_off = float_regs_as_doubles_off; | |
368 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { | |
369 VMReg fpu_name_0 = FrameMap::fpu_regname(n); | |
370 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0); | |
371 // %%% This is really a waste but we'll keep things as they were for now | |
372 if (true) { | |
373 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next()); | |
374 } | |
375 fpu_off += 2; | |
376 } | |
377 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots"); | |
378 } | |
379 | |
380 if (UseSSE >= 2) { | |
381 int xmm_off = xmm_regs_as_doubles_off; | |
382 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { | |
383 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); | |
384 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); | |
385 // %%% This is really a waste but we'll keep things as they were for now | |
386 if (true) { | |
387 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next()); | |
388 } | |
389 xmm_off += 2; | |
390 } | |
391 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); | |
392 | |
393 } else if (UseSSE == 1) { | |
394 int xmm_off = xmm_regs_as_doubles_off; | |
395 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { | |
396 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); | |
397 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); | |
398 xmm_off += 2; | |
399 } | |
400 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); | |
401 } | |
402 } | |
403 | |
404 return map; | |
405 } | |
406 | |
407 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, | |
408 bool save_fpu_registers = true) { | |
409 __ block_comment("save_live_registers"); | |
410 | |
304 | 411 __ pusha(); // integer registers |
0 | 412 |
413 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); | |
414 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); | |
415 | |
304 | 416 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); |
0 | 417 |
418 #ifdef ASSERT | |
304 | 419 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); |
0 | 420 #endif |
421 | |
422 if (save_fpu_registers) { | |
423 if (UseSSE < 2) { | |
424 // save FPU stack | |
304 | 425 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); |
0 | 426 __ fwait(); |
427 | |
428 #ifdef ASSERT | |
429 Label ok; | |
304 | 430 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); |
0 | 431 __ jccb(Assembler::equal, ok); |
432 __ stop("corrupted control word detected"); | |
433 __ bind(ok); | |
434 #endif | |
435 | |
436 // Reset the control word to guard against exceptions being unmasked | |
437 // since fstp_d can cause FPU stack underflow exceptions. Write it | |
438 // into the on stack copy and then reload that to make sure that the | |
439 // current and future values are correct. | |
304 | 440 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); |
441 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); | |
0 | 442 |
443 // Save the FPU registers in de-opt-able form | |
304 | 444 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); |
445 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); | |
446 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); | |
447 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); | |
448 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); | |
449 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); | |
450 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); | |
451 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); | |
0 | 452 } |
453 | |
454 if (UseSSE >= 2) { | |
455 // save XMM registers | |
456 // XMM registers can contain float or double values, but this is not known here, | |
457 // so always save them as doubles. | |
458 // note that float values are _not_ converted automatically, so for float values | |
459 // the second word contains only garbage data. | |
304 | 460 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); |
461 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); | |
462 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); | |
463 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); | |
464 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); | |
465 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); | |
466 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); | |
467 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); | |
468 #ifdef _LP64 | |
469 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8); | |
470 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9); | |
471 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10); | |
472 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11); | |
473 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12); | |
474 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13); | |
475 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14); | |
476 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15); | |
477 #endif // _LP64 | |
0 | 478 } else if (UseSSE == 1) { |
479 // save XMM registers as float because double not supported without SSE2 | |
304 | 480 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); |
481 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); | |
482 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); | |
483 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); | |
484 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); | |
485 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); | |
486 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); | |
487 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); | |
0 | 488 } |
489 } | |
490 | |
491 // FPU stack must be empty now | |
492 __ verify_FPU(0, "save_live_registers"); | |
493 | |
494 return generate_oop_map(sasm, num_rt_args, save_fpu_registers); | |
495 } | |
496 | |
497 | |
498 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) { | |
499 if (restore_fpu_registers) { | |
500 if (UseSSE >= 2) { | |
501 // restore XMM registers | |
304 | 502 __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); |
503 __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); | |
504 __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); | |
505 __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); | |
506 __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); | |
507 __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); | |
508 __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); | |
509 __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); | |
510 #ifdef _LP64 | |
511 __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64)); | |
512 __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72)); | |
513 __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80)); | |
514 __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88)); | |
515 __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96)); | |
516 __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104)); | |
517 __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112)); | |
518 __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120)); | |
519 #endif // _LP64 | |
0 | 520 } else if (UseSSE == 1) { |
521 // restore XMM registers | |
304 | 522 __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); |
523 __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); | |
524 __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); | |
525 __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); | |
526 __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); | |
527 __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); | |
528 __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); | |
529 __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); | |
0 | 530 } |
531 | |
532 if (UseSSE < 2) { | |
304 | 533 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); |
0 | 534 } else { |
535 // check that FPU stack is really empty | |
536 __ verify_FPU(0, "restore_live_registers"); | |
537 } | |
538 | |
539 } else { | |
540 // check that FPU stack is really empty | |
541 __ verify_FPU(0, "restore_live_registers"); | |
542 } | |
543 | |
544 #ifdef ASSERT | |
545 { | |
546 Label ok; | |
304 | 547 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); |
0 | 548 __ jcc(Assembler::equal, ok); |
549 __ stop("bad offsets in frame"); | |
550 __ bind(ok); | |
551 } | |
304 | 552 #endif // ASSERT |
0 | 553 |
304 | 554 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); |
0 | 555 } |
556 | |
557 | |
558 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { | |
559 __ block_comment("restore_live_registers"); | |
560 | |
561 restore_fpu(sasm, restore_fpu_registers); | |
304 | 562 __ popa(); |
0 | 563 } |
564 | |
565 | |
566 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { | |
567 __ block_comment("restore_live_registers_except_rax"); | |
568 | |
569 restore_fpu(sasm, restore_fpu_registers); | |
570 | |
304 | 571 #ifdef _LP64 |
572 __ movptr(r15, Address(rsp, 0)); | |
573 __ movptr(r14, Address(rsp, wordSize)); | |
574 __ movptr(r13, Address(rsp, 2 * wordSize)); | |
575 __ movptr(r12, Address(rsp, 3 * wordSize)); | |
576 __ movptr(r11, Address(rsp, 4 * wordSize)); | |
577 __ movptr(r10, Address(rsp, 5 * wordSize)); | |
578 __ movptr(r9, Address(rsp, 6 * wordSize)); | |
579 __ movptr(r8, Address(rsp, 7 * wordSize)); | |
580 __ movptr(rdi, Address(rsp, 8 * wordSize)); | |
581 __ movptr(rsi, Address(rsp, 9 * wordSize)); | |
582 __ movptr(rbp, Address(rsp, 10 * wordSize)); | |
583 // skip rsp | |
584 __ movptr(rbx, Address(rsp, 12 * wordSize)); | |
585 __ movptr(rdx, Address(rsp, 13 * wordSize)); | |
586 __ movptr(rcx, Address(rsp, 14 * wordSize)); | |
587 | |
588 __ addptr(rsp, 16 * wordSize); | |
589 #else | |
590 | |
591 __ pop(rdi); | |
592 __ pop(rsi); | |
593 __ pop(rbp); | |
594 __ pop(rbx); // skip this value | |
595 __ pop(rbx); | |
596 __ pop(rdx); | |
597 __ pop(rcx); | |
598 __ addptr(rsp, BytesPerWord); | |
599 #endif // _LP64 | |
0 | 600 } |
601 | |
602 | |
603 void Runtime1::initialize_pd() { | |
604 // nothing to do | |
605 } | |
606 | |
607 | |
608 // target: the entry point of the method that creates and posts the exception oop | |
609 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved) | |
610 | |
611 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { | |
612 // preserve all registers | |
613 int num_rt_args = has_argument ? 2 : 1; | |
614 OopMap* oop_map = save_live_registers(sasm, num_rt_args); | |
615 | |
616 // now all registers are saved and can be used freely | |
617 // verify that no old value is used accidentally | |
618 __ invalidate_registers(true, true, true, true, true, true); | |
619 | |
620 // registers used by this stub | |
621 const Register temp_reg = rbx; | |
622 | |
623 // load argument for exception that is passed as an argument into the stub | |
624 if (has_argument) { | |
304 | 625 #ifdef _LP64 |
626 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); | |
627 #else | |
628 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); | |
629 __ push(temp_reg); | |
630 #endif // _LP64 | |
0 | 631 } |
632 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); | |
633 | |
634 OopMapSet* oop_maps = new OopMapSet(); | |
635 oop_maps->add_gc_map(call_offset, oop_map); | |
636 | |
637 __ stop("should not reach here"); | |
638 | |
639 return oop_maps; | |
640 } | |
641 | |
642 | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
643 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
644 __ block_comment("generate_handle_exception"); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
645 |
0 | 646 // incoming parameters |
647 const Register exception_oop = rax; | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
648 const Register exception_pc = rdx; |
0 | 649 // other registers used in this stub |
304 | 650 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); |
0 | 651 |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
652 // Save registers, if required. |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
653 OopMapSet* oop_maps = new OopMapSet(); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
654 OopMap* oop_map = NULL; |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
655 switch (id) { |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
656 case forward_exception_id: |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
657 // We're handling an exception in the context of a compiled frame. |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
658 // The registers have been saved in the standard places. Perform |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
659 // an exception lookup in the caller and dispatch to the handler |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
660 // if found. Otherwise unwind and dispatch to the callers |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
661 // exception handler. |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
662 oop_map = generate_oop_map(sasm, 1 /*thread*/); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
663 |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
664 // load and clear pending exception oop into RAX |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
665 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
666 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
667 |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
668 // load issuing PC (the return address for this stub) into rdx |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
669 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
670 |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
671 // make sure that the vm_results are cleared (may be unnecessary) |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
672 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
673 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
674 break; |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
675 case handle_exception_nofpu_id: |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
676 case handle_exception_id: |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
677 // At this point all registers MAY be live. |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
678 oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
679 break; |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
680 case handle_exception_from_callee_id: { |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
681 // At this point all registers except exception oop (RAX) and |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
682 // exception pc (RDX) are dead. |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
683 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
684 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
685 sasm->set_frame_size(frame_size); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
686 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
687 break; |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
688 } |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
689 default: ShouldNotReachHere(); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
690 } |
0 | 691 |
692 #ifdef TIERED | |
693 // C2 can leave the fpu stack dirty | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
694 if (UseSSE < 2) { |
0 | 695 __ empty_FPU_stack(); |
696 } | |
697 #endif // TIERED | |
698 | |
699 // verify that only rax, and rdx is valid at this time | |
700 __ invalidate_registers(false, true, true, false, true, true); | |
701 // verify that rax, contains a valid exception | |
702 __ verify_not_null_oop(exception_oop); | |
703 | |
704 // load address of JavaThread object for thread-local data | |
304 | 705 NOT_LP64(__ get_thread(thread);) |
0 | 706 |
707 #ifdef ASSERT | |
708 // check that fields in JavaThread for exception oop and issuing pc are | |
709 // empty before writing to them | |
710 Label oop_empty; | |
304 | 711 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); |
0 | 712 __ jcc(Assembler::equal, oop_empty); |
713 __ stop("exception oop already set"); | |
714 __ bind(oop_empty); | |
715 | |
716 Label pc_empty; | |
304 | 717 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); |
0 | 718 __ jcc(Assembler::equal, pc_empty); |
719 __ stop("exception pc already set"); | |
720 __ bind(pc_empty); | |
721 #endif | |
722 | |
723 // save exception oop and issuing pc into JavaThread | |
724 // (exception handler will load it from here) | |
304 | 725 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
726 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); |
0 | 727 |
728 // patch throwing pc into return address (has bci & oop map) | |
304 | 729 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); |
0 | 730 |
731 // compute the exception handler. | |
732 // the exception oop and the throwing pc are read from the fields in JavaThread | |
733 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); | |
734 oop_maps->add_gc_map(call_offset, oop_map); | |
735 | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
736 // rax: handler address |
0 | 737 // will be the deopt blob if nmethod was deoptimized while we looked up |
738 // handler regardless of whether handler existed in the nmethod. | |
739 | |
740 // only rax, is valid at this time, all other registers have been destroyed by the runtime call | |
741 __ invalidate_registers(false, true, true, true, true, true); | |
742 | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
743 // patch the return address, this stub will directly return to the exception handler |
304 | 744 __ movptr(Address(rbp, 1*BytesPerWord), rax); |
0 | 745 |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
746 switch (id) { |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
747 case forward_exception_id: |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
748 case handle_exception_nofpu_id: |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
749 case handle_exception_id: |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
750 // Restore the registers that were saved at the beginning. |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
751 restore_live_registers(sasm, id == handle_exception_nofpu_id); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
752 break; |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
753 case handle_exception_from_callee_id: |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
754 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
755 // since we do a leave anyway. |
0 | 756 |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
757 // Pop the return address since we are possibly changing SP (restoring from BP). |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
758 __ leave(); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
759 __ pop(rcx); |
0 | 760 |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
761 // Restore SP from BP if the exception PC is a method handle call site. |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
762 NOT_LP64(__ get_thread(thread);) |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
763 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
764 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
765 __ jmp(rcx); // jump to exception handler |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
766 break; |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
767 default: ShouldNotReachHere(); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
768 } |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
769 |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
770 return oop_maps; |
0 | 771 } |
772 | |
773 | |
774 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { | |
775 // incoming parameters | |
776 const Register exception_oop = rax; | |
1295 | 777 // callee-saved copy of exception_oop during runtime call |
778 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14); | |
0 | 779 // other registers used in this stub |
780 const Register exception_pc = rdx; | |
781 const Register handler_addr = rbx; | |
304 | 782 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); |
0 | 783 |
784 // verify that only rax, is valid at this time | |
785 __ invalidate_registers(false, true, true, true, true, true); | |
786 | |
787 #ifdef ASSERT | |
788 // check that fields in JavaThread for exception oop and issuing pc are empty | |
304 | 789 NOT_LP64(__ get_thread(thread);) |
0 | 790 Label oop_empty; |
304 | 791 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); |
0 | 792 __ jcc(Assembler::equal, oop_empty); |
793 __ stop("exception oop must be empty"); | |
794 __ bind(oop_empty); | |
795 | |
796 Label pc_empty; | |
304 | 797 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); |
0 | 798 __ jcc(Assembler::equal, pc_empty); |
799 __ stop("exception pc must be empty"); | |
800 __ bind(pc_empty); | |
801 #endif | |
802 | |
803 // clear the FPU stack in case any FPU results are left behind | |
804 __ empty_FPU_stack(); | |
805 | |
1295 | 806 // save exception_oop in callee-saved register to preserve it during runtime calls |
807 __ verify_not_null_oop(exception_oop); | |
808 __ movptr(exception_oop_callee_saved, exception_oop); | |
809 | |
810 NOT_LP64(__ get_thread(thread);) | |
811 // Get return address (is on top of stack after leave). | |
304 | 812 __ movptr(exception_pc, Address(rsp, 0)); |
0 | 813 |
1295 | 814 // search the exception handler address of the caller (using the return address) |
815 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); | |
816 // rax: exception handler address of the caller | |
0 | 817 |
1295 | 818 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call. |
819 __ invalidate_registers(false, true, true, true, false, true); | |
0 | 820 |
821 // move result of call into correct register | |
304 | 822 __ movptr(handler_addr, rax); |
0 | 823 |
1295 | 824 // Restore exception oop to RAX (required convention of exception handler). |
825 __ movptr(exception_oop, exception_oop_callee_saved); | |
0 | 826 |
1295 | 827 // verify that there is really a valid exception in rax |
828 __ verify_not_null_oop(exception_oop); | |
0 | 829 |
830 // get throwing pc (= return address). | |
831 // rdx has been destroyed by the call, so it must be set again | |
832 // the pop is also necessary to simulate the effect of a ret(0) | |
304 | 833 __ pop(exception_pc); |
0 | 834 |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
835 // Restore SP from BP if the exception PC is a method handle call site. |
1295 | 836 NOT_LP64(__ get_thread(thread);) |
1368
93767e6a2dfd
6941529: SharedRuntime::raw_exception_handler_for_return_address must reset thread MethodHandle flag
twisti
parents:
1295
diff
changeset
|
837 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); |
1564 | 838 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); |
0 | 839 |
840 // continue at exception handler (return address removed) | |
841 // note: do *not* remove arguments when unwinding the | |
842 // activation since the caller assumes having | |
843 // all arguments on the stack when entering the | |
844 // runtime to determine the exception handler | |
845 // (GC happens at call site with arguments!) | |
1295 | 846 // rax: exception oop |
0 | 847 // rdx: throwing pc |
1295 | 848 // rbx: exception handler |
0 | 849 __ jmp(handler_addr); |
850 } | |
851 | |
852 | |
853 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { | |
854 // use the maximum number of runtime-arguments here because it is difficult to | |
855 // distinguish each RT-Call. | |
856 // Note: This number affects also the RT-Call in generate_handle_exception because | |
857 // the oop-map is shared for all calls. | |
858 const int num_rt_args = 2; // thread + dummy | |
859 | |
860 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); | |
861 assert(deopt_blob != NULL, "deoptimization blob must have been created"); | |
862 | |
863 OopMap* oop_map = save_live_registers(sasm, num_rt_args); | |
864 | |
304 | 865 #ifdef _LP64 |
866 const Register thread = r15_thread; | |
867 // No need to worry about dummy | |
868 __ mov(c_rarg0, thread); | |
869 #else | |
870 __ push(rax); // push dummy | |
0 | 871 |
872 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) | |
873 // push java thread (becomes first argument of C function) | |
874 __ get_thread(thread); | |
304 | 875 __ push(thread); |
876 #endif // _LP64 | |
0 | 877 __ set_last_Java_frame(thread, noreg, rbp, NULL); |
878 // do the call | |
879 __ call(RuntimeAddress(target)); | |
880 OopMapSet* oop_maps = new OopMapSet(); | |
881 oop_maps->add_gc_map(__ offset(), oop_map); | |
882 // verify callee-saved register | |
883 #ifdef ASSERT | |
884 guarantee(thread != rax, "change this code"); | |
304 | 885 __ push(rax); |
0 | 886 { Label L; |
887 __ get_thread(rax); | |
304 | 888 __ cmpptr(thread, rax); |
0 | 889 __ jcc(Assembler::equal, L); |
304 | 890 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); |
0 | 891 __ bind(L); |
892 } | |
304 | 893 __ pop(rax); |
0 | 894 #endif |
895 __ reset_last_Java_frame(thread, true, false); | |
304 | 896 #ifndef _LP64 |
897 __ pop(rcx); // discard thread arg | |
898 __ pop(rcx); // discard dummy | |
899 #endif // _LP64 | |
0 | 900 |
901 // check for pending exceptions | |
902 { Label L; | |
304 | 903 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); |
0 | 904 __ jcc(Assembler::equal, L); |
905 // exception pending => remove activation and forward to exception handler | |
906 | |
304 | 907 __ testptr(rax, rax); // have we deoptimized? |
0 | 908 __ jump_cc(Assembler::equal, |
909 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); | |
910 | |
911 // the deopt blob expects exceptions in the special fields of | |
912 // JavaThread, so copy and clear pending exception. | |
913 | |
914 // load and clear pending exception | |
304 | 915 __ movptr(rax, Address(thread, Thread::pending_exception_offset())); |
512
db4caa99ef11
6787106: Hotspot 32 bit build fails on platforms having different definitions for intptr_t & int32_t
xlu
parents:
380
diff
changeset
|
916 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); |
0 | 917 |
918 // check that there is really a valid exception | |
919 __ verify_not_null_oop(rax); | |
920 | |
921 // load throwing pc: this is the return address of the stub | |
304 | 922 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); |
0 | 923 |
924 #ifdef ASSERT | |
925 // check that fields in JavaThread for exception oop and issuing pc are empty | |
926 Label oop_empty; | |
304 | 927 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); |
0 | 928 __ jcc(Assembler::equal, oop_empty); |
929 __ stop("exception oop must be empty"); | |
930 __ bind(oop_empty); | |
931 | |
932 Label pc_empty; | |
304 | 933 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); |
0 | 934 __ jcc(Assembler::equal, pc_empty); |
935 __ stop("exception pc must be empty"); | |
936 __ bind(pc_empty); | |
937 #endif | |
938 | |
939 // store exception oop and throwing pc to JavaThread | |
304 | 940 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); |
941 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); | |
0 | 942 |
943 restore_live_registers(sasm); | |
944 | |
945 __ leave(); | |
304 | 946 __ addptr(rsp, BytesPerWord); // remove return address from stack |
0 | 947 |
948 // Forward the exception directly to deopt blob. We can blow no | |
949 // registers and must leave throwing pc on the stack. A patch may | |
950 // have values live in registers so the entry point with the | |
951 // exception in tls. | |
952 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); | |
953 | |
954 __ bind(L); | |
955 } | |
956 | |
957 | |
958 // Runtime will return true if the nmethod has been deoptimized during | |
959 // the patching process. In that case we must do a deopt reexecute instead. | |
960 | |
961 Label reexecuteEntry, cont; | |
962 | |
304 | 963 __ testptr(rax, rax); // have we deoptimized? |
0 | 964 __ jcc(Assembler::equal, cont); // no |
965 | |
966 // Will reexecute. Proper return address is already on the stack we just restore | |
967 // registers, pop all of our frame but the return address and jump to the deopt blob | |
968 restore_live_registers(sasm); | |
969 __ leave(); | |
970 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); | |
971 | |
972 __ bind(cont); | |
973 restore_live_registers(sasm); | |
974 __ leave(); | |
975 __ ret(0); | |
976 | |
977 return oop_maps; | |
978 } | |
979 | |
980 | |
981 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { | |
982 | |
983 // for better readability | |
984 const bool must_gc_arguments = true; | |
985 const bool dont_gc_arguments = false; | |
986 | |
987 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu | |
988 bool save_fpu_registers = true; | |
989 | |
990 // stub code & info for the different stubs | |
991 OopMapSet* oop_maps = NULL; | |
992 switch (id) { | |
993 case forward_exception_id: | |
994 { | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
995 oop_maps = generate_handle_exception(id, sasm); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
996 __ leave(); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
997 __ ret(0); |
0 | 998 } |
999 break; | |
1000 | |
1001 case new_instance_id: | |
1002 case fast_new_instance_id: | |
1003 case fast_new_instance_init_check_id: | |
1004 { | |
1005 Register klass = rdx; // Incoming | |
1006 Register obj = rax; // Result | |
1007 | |
1008 if (id == new_instance_id) { | |
1009 __ set_info("new_instance", dont_gc_arguments); | |
1010 } else if (id == fast_new_instance_id) { | |
1011 __ set_info("fast new_instance", dont_gc_arguments); | |
1012 } else { | |
1013 assert(id == fast_new_instance_init_check_id, "bad StubID"); | |
1014 __ set_info("fast new_instance init check", dont_gc_arguments); | |
1015 } | |
1016 | |
1017 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && | |
1018 UseTLAB && FastTLABRefill) { | |
1019 Label slow_path; | |
1020 Register obj_size = rcx; | |
1021 Register t1 = rbx; | |
1022 Register t2 = rsi; | |
1023 assert_different_registers(klass, obj, obj_size, t1, t2); | |
1024 | |
304 | 1025 __ push(rdi); |
1026 __ push(rbx); | |
0 | 1027 |
1028 if (id == fast_new_instance_init_check_id) { | |
1029 // make sure the klass is initialized | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
1030 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); |
0 | 1031 __ jcc(Assembler::notEqual, slow_path); |
1032 } | |
1033 | |
1034 #ifdef ASSERT | |
1035 // assert object can be fast path allocated | |
1036 { | |
1037 Label ok, not_ok; | |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4048
diff
changeset
|
1038 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); |
0 | 1039 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) |
1040 __ jcc(Assembler::lessEqual, not_ok); | |
1041 __ testl(obj_size, Klass::_lh_instance_slow_path_bit); | |
1042 __ jcc(Assembler::zero, ok); | |
1043 __ bind(not_ok); | |
1044 __ stop("assert(can be fast path allocated)"); | |
1045 __ should_not_reach_here(); | |
1046 __ bind(ok); | |
1047 } | |
1048 #endif // ASSERT | |
1049 | |
1050 // if we got here then the TLAB allocation failed, so try | |
1051 // refilling the TLAB or allocating directly from eden. | |
1052 Label retry_tlab, try_eden; | |
2100
b1a2afa37ec4
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
2002
diff
changeset
|
1053 const Register thread = |
b1a2afa37ec4
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
2002
diff
changeset
|
1054 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi |
0 | 1055 |
1056 __ bind(retry_tlab); | |
1057 | |
304 | 1058 // get the instance size (size is postive so movl is fine for 64bit) |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4048
diff
changeset
|
1059 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); |
2100
b1a2afa37ec4
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
2002
diff
changeset
|
1060 |
0 | 1061 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); |
2100
b1a2afa37ec4
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
2002
diff
changeset
|
1062 |
0 | 1063 __ initialize_object(obj, klass, obj_size, 0, t1, t2); |
1064 __ verify_oop(obj); | |
304 | 1065 __ pop(rbx); |
1066 __ pop(rdi); | |
0 | 1067 __ ret(0); |
1068 | |
1069 __ bind(try_eden); | |
304 | 1070 // get the instance size (size is postive so movl is fine for 64bit) |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4048
diff
changeset
|
1071 __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); |
2100
b1a2afa37ec4
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
2002
diff
changeset
|
1072 |
0 | 1073 __ eden_allocate(obj, obj_size, 0, t1, slow_path); |
2100
b1a2afa37ec4
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
2002
diff
changeset
|
1074 __ incr_allocated_bytes(thread, obj_size, 0); |
b1a2afa37ec4
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
2002
diff
changeset
|
1075 |
0 | 1076 __ initialize_object(obj, klass, obj_size, 0, t1, t2); |
1077 __ verify_oop(obj); | |
304 | 1078 __ pop(rbx); |
1079 __ pop(rdi); | |
0 | 1080 __ ret(0); |
1081 | |
1082 __ bind(slow_path); | |
304 | 1083 __ pop(rbx); |
1084 __ pop(rdi); | |
0 | 1085 } |
1086 | |
1087 __ enter(); | |
1088 OopMap* map = save_live_registers(sasm, 2); | |
1089 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); | |
1090 oop_maps = new OopMapSet(); | |
1091 oop_maps->add_gc_map(call_offset, map); | |
1092 restore_live_registers_except_rax(sasm); | |
1093 __ verify_oop(obj); | |
1094 __ leave(); | |
1095 __ ret(0); | |
1096 | |
1097 // rax,: new instance | |
1098 } | |
1099 | |
1100 break; | |
1101 | |
1102 case counter_overflow_id: | |
1103 { | |
1783 | 1104 Register bci = rax, method = rbx; |
0 | 1105 __ enter(); |
1783 | 1106 OopMap* map = save_live_registers(sasm, 3); |
0 | 1107 // Retrieve bci |
1108 __ movl(bci, Address(rbp, 2*BytesPerWord)); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
1109 // And a pointer to the Method* |
1783 | 1110 __ movptr(method, Address(rbp, 3*BytesPerWord)); |
1111 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); | |
0 | 1112 oop_maps = new OopMapSet(); |
1113 oop_maps->add_gc_map(call_offset, map); | |
1114 restore_live_registers(sasm); | |
1115 __ leave(); | |
1116 __ ret(0); | |
1117 } | |
1118 break; | |
1119 | |
1120 case new_type_array_id: | |
1121 case new_object_array_id: | |
1122 { | |
1123 Register length = rbx; // Incoming | |
1124 Register klass = rdx; // Incoming | |
1125 Register obj = rax; // Result | |
1126 | |
1127 if (id == new_type_array_id) { | |
1128 __ set_info("new_type_array", dont_gc_arguments); | |
1129 } else { | |
1130 __ set_info("new_object_array", dont_gc_arguments); | |
1131 } | |
1132 | |
1133 #ifdef ASSERT | |
1134 // assert object type is really an array of the proper kind | |
1135 { | |
1136 Label ok; | |
1137 Register t0 = obj; | |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4048
diff
changeset
|
1138 __ movl(t0, Address(klass, Klass::layout_helper_offset())); |
0 | 1139 __ sarl(t0, Klass::_lh_array_tag_shift); |
1140 int tag = ((id == new_type_array_id) | |
1141 ? Klass::_lh_array_tag_type_value | |
1142 : Klass::_lh_array_tag_obj_value); | |
1143 __ cmpl(t0, tag); | |
1144 __ jcc(Assembler::equal, ok); | |
1145 __ stop("assert(is an array klass)"); | |
1146 __ should_not_reach_here(); | |
1147 __ bind(ok); | |
1148 } | |
1149 #endif // ASSERT | |
1150 | |
1151 if (UseTLAB && FastTLABRefill) { | |
1152 Register arr_size = rsi; | |
1153 Register t1 = rcx; // must be rcx for use as shift count | |
1154 Register t2 = rdi; | |
1155 Label slow_path; | |
1156 assert_different_registers(length, klass, obj, arr_size, t1, t2); | |
1157 | |
1158 // check that array length is small enough for fast path. | |
1159 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length); | |
1160 __ jcc(Assembler::above, slow_path); | |
1161 | |
1162 // if we got here then the TLAB allocation failed, so try | |
1163 // refilling the TLAB or allocating directly from eden. | |
1164 Label retry_tlab, try_eden; | |
2100
b1a2afa37ec4
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
2002
diff
changeset
|
1165 const Register thread = |
b1a2afa37ec4
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
2002
diff
changeset
|
1166 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi |
0 | 1167 |
1168 __ bind(retry_tlab); | |
1169 | |
1170 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) | |
2100
b1a2afa37ec4
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
2002
diff
changeset
|
1171 // since size is positive movl does right thing on 64bit |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4048
diff
changeset
|
1172 __ movl(t1, Address(klass, Klass::layout_helper_offset())); |
304 | 1173 // since size is postive movl does right thing on 64bit |
0 | 1174 __ movl(arr_size, length); |
1175 assert(t1 == rcx, "fixed register usage"); | |
304 | 1176 __ shlptr(arr_size /* by t1=rcx, mod 32 */); |
1177 __ shrptr(t1, Klass::_lh_header_size_shift); | |
1178 __ andptr(t1, Klass::_lh_header_size_mask); | |
1179 __ addptr(arr_size, t1); | |
1180 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up | |
1181 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); | |
0 | 1182 |
1183 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size | |
1184 | |
1185 __ initialize_header(obj, klass, length, t1, t2); | |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4048
diff
changeset
|
1186 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); |
0 | 1187 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); |
1188 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); | |
304 | 1189 __ andptr(t1, Klass::_lh_header_size_mask); |
1190 __ subptr(arr_size, t1); // body length | |
1191 __ addptr(t1, obj); // body start | |
0 | 1192 __ initialize_body(t1, arr_size, 0, t2); |
1193 __ verify_oop(obj); | |
1194 __ ret(0); | |
1195 | |
1196 __ bind(try_eden); | |
1197 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) | |
2100
b1a2afa37ec4
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
2002
diff
changeset
|
1198 // since size is positive movl does right thing on 64bit |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4048
diff
changeset
|
1199 __ movl(t1, Address(klass, Klass::layout_helper_offset())); |
304 | 1200 // since size is postive movl does right thing on 64bit |
0 | 1201 __ movl(arr_size, length); |
1202 assert(t1 == rcx, "fixed register usage"); | |
304 | 1203 __ shlptr(arr_size /* by t1=rcx, mod 32 */); |
1204 __ shrptr(t1, Klass::_lh_header_size_shift); | |
1205 __ andptr(t1, Klass::_lh_header_size_mask); | |
1206 __ addptr(arr_size, t1); | |
1207 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up | |
1208 __ andptr(arr_size, ~MinObjAlignmentInBytesMask); | |
0 | 1209 |
1210 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size | |
2100
b1a2afa37ec4
7003271: Hotspot should track cumulative Java heap bytes allocated on a per-thread basis
phh
parents:
2002
diff
changeset
|
1211 __ incr_allocated_bytes(thread, arr_size, 0); |
0 | 1212 |
1213 __ initialize_header(obj, klass, length, t1, t2); | |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4048
diff
changeset
|
1214 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); |
0 | 1215 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); |
1216 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); | |
304 | 1217 __ andptr(t1, Klass::_lh_header_size_mask); |
1218 __ subptr(arr_size, t1); // body length | |
1219 __ addptr(t1, obj); // body start | |
0 | 1220 __ initialize_body(t1, arr_size, 0, t2); |
1221 __ verify_oop(obj); | |
1222 __ ret(0); | |
1223 | |
1224 __ bind(slow_path); | |
1225 } | |
1226 | |
1227 __ enter(); | |
1228 OopMap* map = save_live_registers(sasm, 3); | |
1229 int call_offset; | |
1230 if (id == new_type_array_id) { | |
1231 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); | |
1232 } else { | |
1233 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); | |
1234 } | |
1235 | |
1236 oop_maps = new OopMapSet(); | |
1237 oop_maps->add_gc_map(call_offset, map); | |
1238 restore_live_registers_except_rax(sasm); | |
1239 | |
1240 __ verify_oop(obj); | |
1241 __ leave(); | |
1242 __ ret(0); | |
1243 | |
1244 // rax,: new array | |
1245 } | |
1246 break; | |
1247 | |
1248 case new_multi_array_id: | |
1249 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); | |
1250 // rax,: klass | |
1251 // rbx,: rank | |
1252 // rcx: address of 1st dimension | |
1253 OopMap* map = save_live_registers(sasm, 4); | |
1254 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx); | |
1255 | |
1256 oop_maps = new OopMapSet(); | |
1257 oop_maps->add_gc_map(call_offset, map); | |
1258 restore_live_registers_except_rax(sasm); | |
1259 | |
1260 // rax,: new multi array | |
1261 __ verify_oop(rax); | |
1262 } | |
1263 break; | |
1264 | |
1265 case register_finalizer_id: | |
1266 { | |
1267 __ set_info("register_finalizer", dont_gc_arguments); | |
1268 | |
304 | 1269 // This is called via call_runtime so the arguments |
1270 // will be place in C abi locations | |
1271 | |
1272 #ifdef _LP64 | |
1273 __ verify_oop(c_rarg0); | |
1274 __ mov(rax, c_rarg0); | |
1275 #else | |
0 | 1276 // The object is passed on the stack and we haven't pushed a |
1277 // frame yet so it's one work away from top of stack. | |
304 | 1278 __ movptr(rax, Address(rsp, 1 * BytesPerWord)); |
0 | 1279 __ verify_oop(rax); |
304 | 1280 #endif // _LP64 |
0 | 1281 |
1282 // load the klass and check the has finalizer flag | |
1283 Label register_finalizer; | |
1284 Register t = rsi; | |
2002 | 1285 __ load_klass(t, rax); |
4762
069ab3f976d3
7118863: Move sizeof(klassOopDesc) into the *Klass::*_offset_in_bytes() functions
stefank
parents:
4048
diff
changeset
|
1286 __ movl(t, Address(t, Klass::access_flags_offset())); |
0 | 1287 __ testl(t, JVM_ACC_HAS_FINALIZER); |
1288 __ jcc(Assembler::notZero, register_finalizer); | |
1289 __ ret(0); | |
1290 | |
1291 __ bind(register_finalizer); | |
1292 __ enter(); | |
1293 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */); | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
1294 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax); |
0 | 1295 oop_maps = new OopMapSet(); |
1296 oop_maps->add_gc_map(call_offset, oop_map); | |
1297 | |
1298 // Now restore all the live registers | |
1299 restore_live_registers(sasm); | |
1300 | |
1301 __ leave(); | |
1302 __ ret(0); | |
1303 } | |
1304 break; | |
1305 | |
1306 case throw_range_check_failed_id: | |
1307 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); | |
1308 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); | |
1309 } | |
1310 break; | |
1311 | |
1312 case throw_index_exception_id: | |
1313 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); | |
1314 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); | |
1315 } | |
1316 break; | |
1317 | |
1318 case throw_div0_exception_id: | |
1319 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); | |
1320 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); | |
1321 } | |
1322 break; | |
1323 | |
1324 case throw_null_pointer_exception_id: | |
1325 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); | |
1326 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); | |
1327 } | |
1328 break; | |
1329 | |
1330 case handle_exception_nofpu_id: | |
1331 case handle_exception_id: | |
1332 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); | |
2321
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
1333 oop_maps = generate_handle_exception(id, sasm); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
1334 } |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
1335 break; |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
1336 |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
1337 case handle_exception_from_callee_id: |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
1338 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); |
1b4e6a5d98e0
7012914: JSR 292 MethodHandlesTest C1: frame::verify_return_pc(return_address) failed: must be a return pc
twisti
parents:
2168
diff
changeset
|
1339 oop_maps = generate_handle_exception(id, sasm); |
0 | 1340 } |
1341 break; | |
1342 | |
1343 case unwind_exception_id: | |
1344 { __ set_info("unwind_exception", dont_gc_arguments); | |
1345 // note: no stubframe since we are about to leave the current | |
1346 // activation and we are calling a leaf VM function only. | |
1347 generate_unwind_exception(sasm); | |
1348 } | |
1349 break; | |
1350 | |
1351 case throw_array_store_exception_id: | |
1352 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); | |
1353 // tos + 0: link | |
1354 // + 1: return address | |
2168
e4fee0bdaa85
7008809: should report the class in ArrayStoreExceptions from compiled code
never
parents:
2100
diff
changeset
|
1355 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); |
0 | 1356 } |
1357 break; | |
1358 | |
1359 case throw_class_cast_exception_id: | |
1360 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); | |
1361 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); | |
1362 } | |
1363 break; | |
1364 | |
1365 case throw_incompatible_class_change_error_id: | |
1366 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); | |
1367 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); | |
1368 } | |
1369 break; | |
1370 | |
1371 case slow_subtype_check_id: | |
1372 { | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1373 // Typical calling sequence: |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1374 // __ push(klass_RInfo); // object klass or other subclass |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1375 // __ push(sup_k_RInfo); // array element klass or other superclass |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1376 // __ call(slow_subtype_check); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1377 // Note that the subclass is pushed first, and is therefore deepest. |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1378 // Previous versions of this code reversed the names 'sub' and 'super'. |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1379 // This was operationally harmless but made the code unreadable. |
0 | 1380 enum layout { |
304 | 1381 rax_off, SLOT2(raxH_off) |
1382 rcx_off, SLOT2(rcxH_off) | |
1383 rsi_off, SLOT2(rsiH_off) | |
1384 rdi_off, SLOT2(rdiH_off) | |
1385 // saved_rbp_off, SLOT2(saved_rbpH_off) | |
1386 return_off, SLOT2(returnH_off) | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1387 sup_k_off, SLOT2(sup_kH_off) |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1388 klass_off, SLOT2(superH_off) |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1389 framesize, |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1390 result_off = klass_off // deepest argument is also the return value |
0 | 1391 }; |
1392 | |
1393 __ set_info("slow_subtype_check", dont_gc_arguments); | |
304 | 1394 __ push(rdi); |
1395 __ push(rsi); | |
1396 __ push(rcx); | |
1397 __ push(rax); | |
0 | 1398 |
304 | 1399 // This is called by pushing args and not with C abi |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1400 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1401 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass |
0 | 1402 |
1403 Label miss; | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1404 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss); |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1405 |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1406 // fallthrough on success: |
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1407 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result |
304 | 1408 __ pop(rax); |
1409 __ pop(rcx); | |
1410 __ pop(rsi); | |
1411 __ pop(rdi); | |
0 | 1412 __ ret(0); |
1413 | |
1414 __ bind(miss); | |
644
c517646eef23
6813212: factor duplicated assembly code for general subclass check (for 6655638)
jrose
parents:
533
diff
changeset
|
1415 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result |
304 | 1416 __ pop(rax); |
1417 __ pop(rcx); | |
1418 __ pop(rsi); | |
1419 __ pop(rdi); | |
0 | 1420 __ ret(0); |
1421 } | |
1422 break; | |
1423 | |
1424 case monitorenter_nofpu_id: | |
1425 save_fpu_registers = false; | |
1426 // fall through | |
1427 case monitorenter_id: | |
1428 { | |
1429 StubFrame f(sasm, "monitorenter", dont_gc_arguments); | |
1430 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); | |
1431 | |
304 | 1432 // Called with store_parameter and not C abi |
1433 | |
0 | 1434 f.load_argument(1, rax); // rax,: object |
1435 f.load_argument(0, rbx); // rbx,: lock address | |
1436 | |
1437 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx); | |
1438 | |
1439 oop_maps = new OopMapSet(); | |
1440 oop_maps->add_gc_map(call_offset, map); | |
1441 restore_live_registers(sasm, save_fpu_registers); | |
1442 } | |
1443 break; | |
1444 | |
1445 case monitorexit_nofpu_id: | |
1446 save_fpu_registers = false; | |
1447 // fall through | |
1448 case monitorexit_id: | |
1449 { | |
1450 StubFrame f(sasm, "monitorexit", dont_gc_arguments); | |
1451 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); | |
1452 | |
304 | 1453 // Called with store_parameter and not C abi |
1454 | |
0 | 1455 f.load_argument(0, rax); // rax,: lock address |
1456 | |
1457 // note: really a leaf routine but must setup last java sp | |
1458 // => use call_RT for now (speed can be improved by | |
1459 // doing last java sp setup manually) | |
1460 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax); | |
1461 | |
1462 oop_maps = new OopMapSet(); | |
1463 oop_maps->add_gc_map(call_offset, map); | |
1464 restore_live_registers(sasm, save_fpu_registers); | |
4048
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1465 } |
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1466 break; |
0 | 1467 |
4048
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1468 case deoptimize_id: |
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1469 { |
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1470 StubFrame f(sasm, "deoptimize", dont_gc_arguments); |
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1471 const int num_rt_args = 1; // thread |
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1472 OopMap* oop_map = save_live_registers(sasm, num_rt_args); |
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1473 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize)); |
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1474 oop_maps = new OopMapSet(); |
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1475 oop_maps->add_gc_map(call_offset, oop_map); |
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1476 restore_live_registers(sasm); |
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1477 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); |
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1478 assert(deopt_blob != NULL, "deoptimization blob must have been created"); |
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1479 __ leave(); |
cec1757a0134
7102657: JSR 292: C1 deoptimizes unlinked invokedynamic call sites infinitely
twisti
parents:
3899
diff
changeset
|
1480 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); |
0 | 1481 } |
1482 break; | |
1483 | |
1484 case access_field_patching_id: | |
1485 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); | |
1486 // we should set up register map | |
1487 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); | |
1488 } | |
1489 break; | |
1490 | |
1491 case load_klass_patching_id: | |
1492 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); | |
1493 // we should set up register map | |
1494 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); | |
1495 } | |
1496 break; | |
1497 | |
6725
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
1498 case load_mirror_patching_id: |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
1499 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
1500 // we should set up register map |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
1501 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
1502 } |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
1503 break; |
da91efe96a93
6964458: Reimplement class meta-data storage to use native memory
coleenp
parents:
5904
diff
changeset
|
1504 |
12160
f98f5d48f511
7199175: JSR 292: C1 needs patching when invokedynamic/invokehandle call site is not linked
roland
parents:
8860
diff
changeset
|
1505 case load_appendix_patching_id: |
f98f5d48f511
7199175: JSR 292: C1 needs patching when invokedynamic/invokehandle call site is not linked
roland
parents:
8860
diff
changeset
|
1506 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); |
f98f5d48f511
7199175: JSR 292: C1 needs patching when invokedynamic/invokehandle call site is not linked
roland
parents:
8860
diff
changeset
|
1507 // we should set up register map |
f98f5d48f511
7199175: JSR 292: C1 needs patching when invokedynamic/invokehandle call site is not linked
roland
parents:
8860
diff
changeset
|
1508 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); |
f98f5d48f511
7199175: JSR 292: C1 needs patching when invokedynamic/invokehandle call site is not linked
roland
parents:
8860
diff
changeset
|
1509 } |
f98f5d48f511
7199175: JSR 292: C1 needs patching when invokedynamic/invokehandle call site is not linked
roland
parents:
8860
diff
changeset
|
1510 break; |
f98f5d48f511
7199175: JSR 292: C1 needs patching when invokedynamic/invokehandle call site is not linked
roland
parents:
8860
diff
changeset
|
1511 |
0 | 1512 case dtrace_object_alloc_id: |
1513 { // rax,: object | |
1514 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); | |
1515 // we can't gc here so skip the oopmap but make sure that all | |
1516 // the live registers get saved. | |
1517 save_live_registers(sasm, 1); | |
1518 | |
304 | 1519 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); |
0 | 1520 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); |
304 | 1521 NOT_LP64(__ pop(rax)); |
0 | 1522 |
1523 restore_live_registers(sasm); | |
1524 } | |
1525 break; | |
1526 | |
1527 case fpu2long_stub_id: | |
1528 { | |
1529 // rax, and rdx are destroyed, but should be free since the result is returned there | |
1530 // preserve rsi,ecx | |
304 | 1531 __ push(rsi); |
1532 __ push(rcx); | |
1533 LP64_ONLY(__ push(rdx);) | |
0 | 1534 |
1535 // check for NaN | |
1536 Label return0, do_return, return_min_jlong, do_convert; | |
1537 | |
304 | 1538 Address value_high_word(rsp, wordSize + 4); |
1539 Address value_low_word(rsp, wordSize); | |
1540 Address result_high_word(rsp, 3*wordSize + 4); | |
1541 Address result_low_word(rsp, 3*wordSize); | |
0 | 1542 |
304 | 1543 __ subptr(rsp, 32); // more than enough on 32bit |
0 | 1544 __ fst_d(value_low_word); |
1545 __ movl(rax, value_high_word); | |
1546 __ andl(rax, 0x7ff00000); | |
1547 __ cmpl(rax, 0x7ff00000); | |
1548 __ jcc(Assembler::notEqual, do_convert); | |
1549 __ movl(rax, value_high_word); | |
1550 __ andl(rax, 0xfffff); | |
1551 __ orl(rax, value_low_word); | |
1552 __ jcc(Assembler::notZero, return0); | |
1553 | |
1554 __ bind(do_convert); | |
1555 __ fnstcw(Address(rsp, 0)); | |
304 | 1556 __ movzwl(rax, Address(rsp, 0)); |
0 | 1557 __ orl(rax, 0xc00); |
1558 __ movw(Address(rsp, 2), rax); | |
1559 __ fldcw(Address(rsp, 2)); | |
1560 __ fwait(); | |
1561 __ fistp_d(result_low_word); | |
1562 __ fldcw(Address(rsp, 0)); | |
1563 __ fwait(); | |
304 | 1564 // This gets the entire long in rax on 64bit |
1565 __ movptr(rax, result_low_word); | |
1566 // testing of high bits | |
0 | 1567 __ movl(rdx, result_high_word); |
304 | 1568 __ mov(rcx, rax); |
0 | 1569 // What the heck is the point of the next instruction??? |
1570 __ xorl(rcx, 0x0); | |
1571 __ movl(rsi, 0x80000000); | |
1572 __ xorl(rsi, rdx); | |
1573 __ orl(rcx, rsi); | |
1574 __ jcc(Assembler::notEqual, do_return); | |
1575 __ fldz(); | |
1576 __ fcomp_d(value_low_word); | |
1577 __ fnstsw_ax(); | |
304 | 1578 #ifdef _LP64 |
1579 __ testl(rax, 0x4100); // ZF & CF == 0 | |
1580 __ jcc(Assembler::equal, return_min_jlong); | |
1581 #else | |
0 | 1582 __ sahf(); |
1583 __ jcc(Assembler::above, return_min_jlong); | |
304 | 1584 #endif // _LP64 |
0 | 1585 // return max_jlong |
304 | 1586 #ifndef _LP64 |
0 | 1587 __ movl(rdx, 0x7fffffff); |
1588 __ movl(rax, 0xffffffff); | |
304 | 1589 #else |
1590 __ mov64(rax, CONST64(0x7fffffffffffffff)); | |
1591 #endif // _LP64 | |
0 | 1592 __ jmp(do_return); |
1593 | |
1594 __ bind(return_min_jlong); | |
304 | 1595 #ifndef _LP64 |
0 | 1596 __ movl(rdx, 0x80000000); |
1597 __ xorl(rax, rax); | |
304 | 1598 #else |
1599 __ mov64(rax, CONST64(0x8000000000000000)); | |
1600 #endif // _LP64 | |
0 | 1601 __ jmp(do_return); |
1602 | |
1603 __ bind(return0); | |
1604 __ fpop(); | |
304 | 1605 #ifndef _LP64 |
1606 __ xorptr(rdx,rdx); | |
1607 __ xorptr(rax,rax); | |
1608 #else | |
1609 __ xorptr(rax, rax); | |
1610 #endif // _LP64 | |
0 | 1611 |
1612 __ bind(do_return); | |
304 | 1613 __ addptr(rsp, 32); |
1614 LP64_ONLY(__ pop(rdx);) | |
1615 __ pop(rcx); | |
1616 __ pop(rsi); | |
0 | 1617 __ ret(0); |
1618 } | |
1619 break; | |
1620 | |
8001
db9981fd3124
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
6725
diff
changeset
|
1621 #if INCLUDE_ALL_GCS |
342 | 1622 case g1_pre_barrier_slow_id: |
1623 { | |
1624 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments); | |
1625 // arg0 : previous value of memory | |
1626 | |
1627 BarrierSet* bs = Universe::heap()->barrier_set(); | |
1628 if (bs->kind() != BarrierSet::G1SATBCTLogging) { | |
362 | 1629 __ movptr(rax, (int)id); |
342 | 1630 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); |
1631 __ should_not_reach_here(); | |
1632 break; | |
1633 } | |
362 | 1634 __ push(rax); |
1635 __ push(rdx); | |
342 | 1636 |
1637 const Register pre_val = rax; | |
362 | 1638 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); |
342 | 1639 const Register tmp = rdx; |
1640 | |
362 | 1641 NOT_LP64(__ get_thread(thread);) |
342 | 1642 |
1643 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + | |
1644 PtrQueue::byte_offset_of_active())); | |
1645 | |
1646 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + | |
1647 PtrQueue::byte_offset_of_index())); | |
1648 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + | |
1649 PtrQueue::byte_offset_of_buf())); | |
1650 | |
1651 | |
1652 Label done; | |
1653 Label runtime; | |
1654 | |
1655 // Can we store original value in the thread's buffer? | |
1656 | |
362 | 1657 #ifdef _LP64 |
1572 | 1658 __ movslq(tmp, queue_index); |
362 | 1659 __ cmpq(tmp, 0); |
1660 #else | |
342 | 1661 __ cmpl(queue_index, 0); |
362 | 1662 #endif |
342 | 1663 __ jcc(Assembler::equal, runtime); |
362 | 1664 #ifdef _LP64 |
1665 __ subq(tmp, wordSize); | |
1666 __ movl(queue_index, tmp); | |
1667 __ addq(tmp, buffer); | |
1668 #else | |
342 | 1669 __ subl(queue_index, wordSize); |
1670 __ movl(tmp, buffer); | |
1671 __ addl(tmp, queue_index); | |
362 | 1672 #endif |
1673 | |
342 | 1674 // prev_val (rax) |
1675 f.load_argument(0, pre_val); | |
362 | 1676 __ movptr(Address(tmp, 0), pre_val); |
342 | 1677 __ jmp(done); |
1678 | |
1679 __ bind(runtime); | |
1572 | 1680 __ push(rcx); |
1681 #ifdef _LP64 | |
1682 __ push(r8); | |
1683 __ push(r9); | |
1684 __ push(r10); | |
1685 __ push(r11); | |
1686 # ifndef _WIN64 | |
1687 __ push(rdi); | |
1688 __ push(rsi); | |
1689 # endif | |
1690 #endif | |
342 | 1691 // load the pre-value |
1692 f.load_argument(0, rcx); | |
1693 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread); | |
1572 | 1694 #ifdef _LP64 |
1695 # ifndef _WIN64 | |
1696 __ pop(rsi); | |
1697 __ pop(rdi); | |
1698 # endif | |
1699 __ pop(r11); | |
1700 __ pop(r10); | |
1701 __ pop(r9); | |
1702 __ pop(r8); | |
1703 #endif | |
362 | 1704 __ pop(rcx); |
1572 | 1705 __ bind(done); |
342 | 1706 |
362 | 1707 __ pop(rdx); |
1708 __ pop(rax); | |
342 | 1709 } |
1710 break; | |
1711 | |
1712 case g1_post_barrier_slow_id: | |
1713 { | |
1714 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments); | |
1715 | |
1716 | |
1717 // arg0: store_address | |
1718 Address store_addr(rbp, 2*BytesPerWord); | |
1719 | |
1720 BarrierSet* bs = Universe::heap()->barrier_set(); | |
1721 CardTableModRefBS* ct = (CardTableModRefBS*)bs; | |
13424
61746b5f0ed3
8028109: compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java crashes in RT_Baseline
anoll
parents:
12835
diff
changeset
|
1722 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); |
61746b5f0ed3
8028109: compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java crashes in RT_Baseline
anoll
parents:
12835
diff
changeset
|
1723 |
342 | 1724 Label done; |
1725 Label runtime; | |
1726 | |
13424
61746b5f0ed3
8028109: compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java crashes in RT_Baseline
anoll
parents:
12835
diff
changeset
|
1727 // At this point we know new_value is non-NULL and the new_value crosses regions. |
342 | 1728 // Must check to see if card is already dirty |
1729 | |
362 | 1730 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); |
342 | 1731 |
1732 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + | |
1733 PtrQueue::byte_offset_of_index())); | |
1734 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + | |
1735 PtrQueue::byte_offset_of_buf())); | |
1736 | |
362 | 1737 __ push(rax); |
1572 | 1738 __ push(rcx); |
342 | 1739 |
13424
61746b5f0ed3
8028109: compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java crashes in RT_Baseline
anoll
parents:
12835
diff
changeset
|
1740 const Register cardtable = rax; |
61746b5f0ed3
8028109: compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java crashes in RT_Baseline
anoll
parents:
12835
diff
changeset
|
1741 const Register card_addr = rcx; |
342 | 1742 |
362 | 1743 f.load_argument(0, card_addr); |
13424
61746b5f0ed3
8028109: compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java crashes in RT_Baseline
anoll
parents:
12835
diff
changeset
|
1744 __ shrptr(card_addr, CardTableModRefBS::card_shift); |
61746b5f0ed3
8028109: compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java crashes in RT_Baseline
anoll
parents:
12835
diff
changeset
|
1745 // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT |
61746b5f0ed3
8028109: compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java crashes in RT_Baseline
anoll
parents:
12835
diff
changeset
|
1746 // a valid address and therefore is not properly handled by the relocation code. |
61746b5f0ed3
8028109: compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java crashes in RT_Baseline
anoll
parents:
12835
diff
changeset
|
1747 __ movptr(cardtable, (intptr_t)ct->byte_map_base); |
61746b5f0ed3
8028109: compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java crashes in RT_Baseline
anoll
parents:
12835
diff
changeset
|
1748 __ addptr(card_addr, cardtable); |
362 | 1749 |
13424
61746b5f0ed3
8028109: compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java crashes in RT_Baseline
anoll
parents:
12835
diff
changeset
|
1750 NOT_LP64(__ get_thread(thread);) |
362 | 1751 |
12835
69944b868a32
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
12160
diff
changeset
|
1752 __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val()); |
69944b868a32
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
12160
diff
changeset
|
1753 __ jcc(Assembler::equal, done); |
69944b868a32
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
12160
diff
changeset
|
1754 |
69944b868a32
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
12160
diff
changeset
|
1755 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad)); |
69944b868a32
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
12160
diff
changeset
|
1756 __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); |
342 | 1757 __ jcc(Assembler::equal, done); |
1758 | |
1759 // storing region crossing non-NULL, card is clean. | |
1760 // dirty card and log. | |
1761 | |
12835
69944b868a32
8014555: G1: Memory ordering problem with Conc refinement and card marking
mgerdin
parents:
12160
diff
changeset
|
1762 __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val()); |
342 | 1763 |
1764 __ cmpl(queue_index, 0); | |
1765 __ jcc(Assembler::equal, runtime); | |
1766 __ subl(queue_index, wordSize); | |
1767 | |
1768 const Register buffer_addr = rbx; | |
362 | 1769 __ push(rbx); |
1770 | |
1771 __ movptr(buffer_addr, buffer); | |
342 | 1772 |
362 | 1773 #ifdef _LP64 |
1774 __ movslq(rscratch1, queue_index); | |
1775 __ addptr(buffer_addr, rscratch1); | |
1776 #else | |
1777 __ addptr(buffer_addr, queue_index); | |
1778 #endif | |
1779 __ movptr(Address(buffer_addr, 0), card_addr); | |
1780 | |
1781 __ pop(rbx); | |
342 | 1782 __ jmp(done); |
1783 | |
1784 __ bind(runtime); | |
1572 | 1785 __ push(rdx); |
1786 #ifdef _LP64 | |
1787 __ push(r8); | |
1788 __ push(r9); | |
1789 __ push(r10); | |
1790 __ push(r11); | |
1791 # ifndef _WIN64 | |
1792 __ push(rdi); | |
1793 __ push(rsi); | |
1794 # endif | |
1795 #endif | |
342 | 1796 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); |
1572 | 1797 #ifdef _LP64 |
1798 # ifndef _WIN64 | |
1799 __ pop(rsi); | |
1800 __ pop(rdi); | |
1801 # endif | |
1802 __ pop(r11); | |
1803 __ pop(r10); | |
1804 __ pop(r9); | |
1805 __ pop(r8); | |
1806 #endif | |
1807 __ pop(rdx); | |
1808 __ bind(done); | |
342 | 1809 |
1572 | 1810 __ pop(rcx); |
362 | 1811 __ pop(rax); |
342 | 1812 |
1813 } | |
1814 break; | |
8001
db9981fd3124
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
jprovino
parents:
6725
diff
changeset
|
1815 #endif // INCLUDE_ALL_GCS |
342 | 1816 |
8860 | 1817 case predicate_failed_trap_id: |
1818 { | |
1819 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); | |
1820 | |
1821 OopMap* map = save_live_registers(sasm, 1); | |
1822 | |
1823 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); | |
1824 oop_maps = new OopMapSet(); | |
1825 oop_maps->add_gc_map(call_offset, map); | |
1826 restore_live_registers(sasm); | |
1827 __ leave(); | |
1828 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); | |
1829 assert(deopt_blob != NULL, "deoptimization blob must have been created"); | |
1830 | |
1831 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); | |
1832 } | |
1833 break; | |
1834 | |
0 | 1835 default: |
1836 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); | |
304 | 1837 __ movptr(rax, (int)id); |
0 | 1838 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); |
1839 __ should_not_reach_here(); | |
1840 } | |
1841 break; | |
1842 } | |
1843 return oop_maps; | |
1844 } | |
1845 | |
1846 #undef __ | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1579
diff
changeset
|
1847 |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1579
diff
changeset
|
1848 const char *Runtime1::pd_name_for_address(address entry) { |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1579
diff
changeset
|
1849 return "<unknown function>"; |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1579
diff
changeset
|
1850 } |