Mercurial > hg > graal-compiler
annotate src/cpu/x86/vm/stubGenerator_x86_64.cpp @ 144:e3729351c946
6697534: Premature GC and invalid lgrp selection with NUMA-aware allocator.
Summary: Don't move tops of the chunks in ensure_parsibility(). Handle the situation with Solaris when a machine has a locality group with no memory.
Reviewed-by: apetrusenko, jcoomes, ysr
author | iveresov |
---|---|
date | Fri, 09 May 2008 16:34:08 +0400 |
parents | b130b98db9cf |
children | d1605aabd0a1 37f87013dfd8 |
rev | line source |
---|---|
0 | 1 /* |
2 * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_stubGenerator_x86_64.cpp.incl" | |
27 | |
28 // Declaration and definition of StubGenerator (no .hpp file). | |
29 // For a more detailed description of the stub routine structure | |
30 // see the comment in stubRoutines.hpp | |
31 | |
32 #define __ _masm-> | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
33 #define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8) |
0 | 34 |
35 #ifdef PRODUCT | |
36 #define BLOCK_COMMENT(str) /* nothing */ | |
37 #else | |
38 #define BLOCK_COMMENT(str) __ block_comment(str) | |
39 #endif | |
40 | |
41 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") | |
42 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions | |
43 | |
44 // Stub Code definitions | |
45 | |
46 static address handle_unsafe_access() { | |
47 JavaThread* thread = JavaThread::current(); | |
48 address pc = thread->saved_exception_pc(); | |
49 // pc is the instruction which we must emulate | |
50 // doing a no-op is fine: return garbage from the load | |
51 // therefore, compute npc | |
52 address npc = Assembler::locate_next_instruction(pc); | |
53 | |
54 // request an async exception | |
55 thread->set_pending_unsafe_access_error(); | |
56 | |
57 // return address of next instruction to execute | |
58 return npc; | |
59 } | |
60 | |
61 class StubGenerator: public StubCodeGenerator { | |
62 private: | |
63 | |
64 #ifdef PRODUCT | |
65 #define inc_counter_np(counter) (0) | |
66 #else | |
67 void inc_counter_np_(int& counter) { | |
68 __ incrementl(ExternalAddress((address)&counter)); | |
69 } | |
70 #define inc_counter_np(counter) \ | |
71 BLOCK_COMMENT("inc_counter " #counter); \ | |
72 inc_counter_np_(counter); | |
73 #endif | |
74 | |
75 // Call stubs are used to call Java from C | |
76 // | |
77 // Linux Arguments: | |
78 // c_rarg0: call wrapper address address | |
79 // c_rarg1: result address | |
80 // c_rarg2: result type BasicType | |
81 // c_rarg3: method methodOop | |
82 // c_rarg4: (interpreter) entry point address | |
83 // c_rarg5: parameters intptr_t* | |
84 // 16(rbp): parameter size (in words) int | |
85 // 24(rbp): thread Thread* | |
86 // | |
87 // [ return_from_Java ] <--- rsp | |
88 // [ argument word n ] | |
89 // ... | |
90 // -12 [ argument word 1 ] | |
91 // -11 [ saved r15 ] <--- rsp_after_call | |
92 // -10 [ saved r14 ] | |
93 // -9 [ saved r13 ] | |
94 // -8 [ saved r12 ] | |
95 // -7 [ saved rbx ] | |
96 // -6 [ call wrapper ] | |
97 // -5 [ result ] | |
98 // -4 [ result type ] | |
99 // -3 [ method ] | |
100 // -2 [ entry point ] | |
101 // -1 [ parameters ] | |
102 // 0 [ saved rbp ] <--- rbp | |
103 // 1 [ return address ] | |
104 // 2 [ parameter size ] | |
105 // 3 [ thread ] | |
106 // | |
107 // Windows Arguments: | |
108 // c_rarg0: call wrapper address address | |
109 // c_rarg1: result address | |
110 // c_rarg2: result type BasicType | |
111 // c_rarg3: method methodOop | |
112 // 48(rbp): (interpreter) entry point address | |
113 // 56(rbp): parameters intptr_t* | |
114 // 64(rbp): parameter size (in words) int | |
115 // 72(rbp): thread Thread* | |
116 // | |
117 // [ return_from_Java ] <--- rsp | |
118 // [ argument word n ] | |
119 // ... | |
120 // -8 [ argument word 1 ] | |
121 // -7 [ saved r15 ] <--- rsp_after_call | |
122 // -6 [ saved r14 ] | |
123 // -5 [ saved r13 ] | |
124 // -4 [ saved r12 ] | |
125 // -3 [ saved rdi ] | |
126 // -2 [ saved rsi ] | |
127 // -1 [ saved rbx ] | |
128 // 0 [ saved rbp ] <--- rbp | |
129 // 1 [ return address ] | |
130 // 2 [ call wrapper ] | |
131 // 3 [ result ] | |
132 // 4 [ result type ] | |
133 // 5 [ method ] | |
134 // 6 [ entry point ] | |
135 // 7 [ parameters ] | |
136 // 8 [ parameter size ] | |
137 // 9 [ thread ] | |
138 // | |
139 // Windows reserves the callers stack space for arguments 1-4. | |
140 // We spill c_rarg0-c_rarg3 to this space. | |
141 | |
142 // Call stub stack layout word offsets from rbp | |
143 enum call_stub_layout { | |
144 #ifdef _WIN64 | |
145 rsp_after_call_off = -7, | |
146 r15_off = rsp_after_call_off, | |
147 r14_off = -6, | |
148 r13_off = -5, | |
149 r12_off = -4, | |
150 rdi_off = -3, | |
151 rsi_off = -2, | |
152 rbx_off = -1, | |
153 rbp_off = 0, | |
154 retaddr_off = 1, | |
155 call_wrapper_off = 2, | |
156 result_off = 3, | |
157 result_type_off = 4, | |
158 method_off = 5, | |
159 entry_point_off = 6, | |
160 parameters_off = 7, | |
161 parameter_size_off = 8, | |
162 thread_off = 9 | |
163 #else | |
164 rsp_after_call_off = -12, | |
165 mxcsr_off = rsp_after_call_off, | |
166 r15_off = -11, | |
167 r14_off = -10, | |
168 r13_off = -9, | |
169 r12_off = -8, | |
170 rbx_off = -7, | |
171 call_wrapper_off = -6, | |
172 result_off = -5, | |
173 result_type_off = -4, | |
174 method_off = -3, | |
175 entry_point_off = -2, | |
176 parameters_off = -1, | |
177 rbp_off = 0, | |
178 retaddr_off = 1, | |
179 parameter_size_off = 2, | |
180 thread_off = 3 | |
181 #endif | |
182 }; | |
183 | |
184 address generate_call_stub(address& return_address) { | |
185 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 && | |
186 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off, | |
187 "adjust this code"); | |
188 StubCodeMark mark(this, "StubRoutines", "call_stub"); | |
189 address start = __ pc(); | |
190 | |
191 // same as in generate_catch_exception()! | |
192 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); | |
193 | |
194 const Address call_wrapper (rbp, call_wrapper_off * wordSize); | |
195 const Address result (rbp, result_off * wordSize); | |
196 const Address result_type (rbp, result_type_off * wordSize); | |
197 const Address method (rbp, method_off * wordSize); | |
198 const Address entry_point (rbp, entry_point_off * wordSize); | |
199 const Address parameters (rbp, parameters_off * wordSize); | |
200 const Address parameter_size(rbp, parameter_size_off * wordSize); | |
201 | |
202 // same as in generate_catch_exception()! | |
203 const Address thread (rbp, thread_off * wordSize); | |
204 | |
205 const Address r15_save(rbp, r15_off * wordSize); | |
206 const Address r14_save(rbp, r14_off * wordSize); | |
207 const Address r13_save(rbp, r13_off * wordSize); | |
208 const Address r12_save(rbp, r12_off * wordSize); | |
209 const Address rbx_save(rbp, rbx_off * wordSize); | |
210 | |
211 // stub code | |
212 __ enter(); | |
213 __ subq(rsp, -rsp_after_call_off * wordSize); | |
214 | |
215 // save register parameters | |
216 #ifndef _WIN64 | |
217 __ movq(parameters, c_rarg5); // parameters | |
218 __ movq(entry_point, c_rarg4); // entry_point | |
219 #endif | |
220 | |
221 __ movq(method, c_rarg3); // method | |
222 __ movl(result_type, c_rarg2); // result type | |
223 __ movq(result, c_rarg1); // result | |
224 __ movq(call_wrapper, c_rarg0); // call wrapper | |
225 | |
226 // save regs belonging to calling function | |
227 __ movq(rbx_save, rbx); | |
228 __ movq(r12_save, r12); | |
229 __ movq(r13_save, r13); | |
230 __ movq(r14_save, r14); | |
231 __ movq(r15_save, r15); | |
232 | |
233 #ifdef _WIN64 | |
234 const Address rdi_save(rbp, rdi_off * wordSize); | |
235 const Address rsi_save(rbp, rsi_off * wordSize); | |
236 | |
237 __ movq(rsi_save, rsi); | |
238 __ movq(rdi_save, rdi); | |
239 #else | |
240 const Address mxcsr_save(rbp, mxcsr_off * wordSize); | |
241 { | |
242 Label skip_ldmx; | |
243 __ stmxcsr(mxcsr_save); | |
244 __ movl(rax, mxcsr_save); | |
245 __ andl(rax, MXCSR_MASK); // Only check control and mask bits | |
246 ExternalAddress mxcsr_std(StubRoutines::amd64::mxcsr_std()); | |
247 __ cmp32(rax, mxcsr_std); | |
248 __ jcc(Assembler::equal, skip_ldmx); | |
249 __ ldmxcsr(mxcsr_std); | |
250 __ bind(skip_ldmx); | |
251 } | |
252 #endif | |
253 | |
254 // Load up thread register | |
255 __ movq(r15_thread, thread); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
256 __ reinit_heapbase(); |
0 | 257 |
258 #ifdef ASSERT | |
259 // make sure we have no pending exceptions | |
260 { | |
261 Label L; | |
262 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD); | |
263 __ jcc(Assembler::equal, L); | |
264 __ stop("StubRoutines::call_stub: entered with pending exception"); | |
265 __ bind(L); | |
266 } | |
267 #endif | |
268 | |
269 // pass parameters if any | |
270 BLOCK_COMMENT("pass parameters if any"); | |
271 Label parameters_done; | |
272 __ movl(c_rarg3, parameter_size); | |
273 __ testl(c_rarg3, c_rarg3); | |
274 __ jcc(Assembler::zero, parameters_done); | |
275 | |
276 Label loop; | |
277 __ movq(c_rarg2, parameters); // parameter pointer | |
278 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1 | |
279 __ BIND(loop); | |
280 if (TaggedStackInterpreter) { | |
281 __ movq(rax, Address(c_rarg2, 0)); // get tag | |
282 __ addq(c_rarg2, wordSize); // advance to next tag | |
283 __ pushq(rax); // pass tag | |
284 } | |
285 __ movq(rax, Address(c_rarg2, 0)); // get parameter | |
286 __ addq(c_rarg2, wordSize); // advance to next parameter | |
287 __ decrementl(c_rarg1); // decrement counter | |
288 __ pushq(rax); // pass parameter | |
289 __ jcc(Assembler::notZero, loop); | |
290 | |
291 // call Java function | |
292 __ BIND(parameters_done); | |
293 __ movq(rbx, method); // get methodOop | |
294 __ movq(c_rarg1, entry_point); // get entry_point | |
295 __ movq(r13, rsp); // set sender sp | |
296 BLOCK_COMMENT("call Java function"); | |
297 __ call(c_rarg1); | |
298 | |
299 BLOCK_COMMENT("call_stub_return_address:"); | |
300 return_address = __ pc(); | |
301 | |
302 // store result depending on type (everything that is not | |
303 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT) | |
304 __ movq(c_rarg0, result); | |
305 Label is_long, is_float, is_double, exit; | |
306 __ movl(c_rarg1, result_type); | |
307 __ cmpl(c_rarg1, T_OBJECT); | |
308 __ jcc(Assembler::equal, is_long); | |
309 __ cmpl(c_rarg1, T_LONG); | |
310 __ jcc(Assembler::equal, is_long); | |
311 __ cmpl(c_rarg1, T_FLOAT); | |
312 __ jcc(Assembler::equal, is_float); | |
313 __ cmpl(c_rarg1, T_DOUBLE); | |
314 __ jcc(Assembler::equal, is_double); | |
315 | |
316 // handle T_INT case | |
317 __ movl(Address(c_rarg0, 0), rax); | |
318 | |
319 __ BIND(exit); | |
320 | |
321 // pop parameters | |
322 __ leaq(rsp, rsp_after_call); | |
323 | |
324 #ifdef ASSERT | |
325 // verify that threads correspond | |
326 { | |
327 Label L, S; | |
328 __ cmpq(r15_thread, thread); | |
329 __ jcc(Assembler::notEqual, S); | |
330 __ get_thread(rbx); | |
331 __ cmpq(r15_thread, rbx); | |
332 __ jcc(Assembler::equal, L); | |
333 __ bind(S); | |
334 __ jcc(Assembler::equal, L); | |
335 __ stop("StubRoutines::call_stub: threads must correspond"); | |
336 __ bind(L); | |
337 } | |
338 #endif | |
339 | |
340 // restore regs belonging to calling function | |
341 __ movq(r15, r15_save); | |
342 __ movq(r14, r14_save); | |
343 __ movq(r13, r13_save); | |
344 __ movq(r12, r12_save); | |
345 __ movq(rbx, rbx_save); | |
346 | |
347 #ifdef _WIN64 | |
348 __ movq(rdi, rdi_save); | |
349 __ movq(rsi, rsi_save); | |
350 #else | |
351 __ ldmxcsr(mxcsr_save); | |
352 #endif | |
353 | |
354 // restore rsp | |
355 __ addq(rsp, -rsp_after_call_off * wordSize); | |
356 | |
357 // return | |
358 __ popq(rbp); | |
359 __ ret(0); | |
360 | |
361 // handle return types different from T_INT | |
362 __ BIND(is_long); | |
363 __ movq(Address(c_rarg0, 0), rax); | |
364 __ jmp(exit); | |
365 | |
366 __ BIND(is_float); | |
367 __ movflt(Address(c_rarg0, 0), xmm0); | |
368 __ jmp(exit); | |
369 | |
370 __ BIND(is_double); | |
371 __ movdbl(Address(c_rarg0, 0), xmm0); | |
372 __ jmp(exit); | |
373 | |
374 return start; | |
375 } | |
376 | |
377 // Return point for a Java call if there's an exception thrown in | |
378 // Java code. The exception is caught and transformed into a | |
379 // pending exception stored in JavaThread that can be tested from | |
380 // within the VM. | |
381 // | |
382 // Note: Usually the parameters are removed by the callee. In case | |
383 // of an exception crossing an activation frame boundary, that is | |
384 // not the case if the callee is compiled code => need to setup the | |
385 // rsp. | |
386 // | |
387 // rax: exception oop | |
388 | |
389 address generate_catch_exception() { | |
390 StubCodeMark mark(this, "StubRoutines", "catch_exception"); | |
391 address start = __ pc(); | |
392 | |
393 // same as in generate_call_stub(): | |
394 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize); | |
395 const Address thread (rbp, thread_off * wordSize); | |
396 | |
397 #ifdef ASSERT | |
398 // verify that threads correspond | |
399 { | |
400 Label L, S; | |
401 __ cmpq(r15_thread, thread); | |
402 __ jcc(Assembler::notEqual, S); | |
403 __ get_thread(rbx); | |
404 __ cmpq(r15_thread, rbx); | |
405 __ jcc(Assembler::equal, L); | |
406 __ bind(S); | |
407 __ stop("StubRoutines::catch_exception: threads must correspond"); | |
408 __ bind(L); | |
409 } | |
410 #endif | |
411 | |
412 // set pending exception | |
413 __ verify_oop(rax); | |
414 | |
415 __ movq(Address(r15_thread, Thread::pending_exception_offset()), rax); | |
416 __ lea(rscratch1, ExternalAddress((address)__FILE__)); | |
417 __ movq(Address(r15_thread, Thread::exception_file_offset()), rscratch1); | |
418 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__); | |
419 | |
420 // complete return to VM | |
421 assert(StubRoutines::_call_stub_return_address != NULL, | |
422 "_call_stub_return_address must have been generated before"); | |
423 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address)); | |
424 | |
425 return start; | |
426 } | |
427 | |
428 // Continuation point for runtime calls returning with a pending | |
429 // exception. The pending exception check happened in the runtime | |
430 // or native call stub. The pending exception in Thread is | |
431 // converted into a Java-level exception. | |
432 // | |
433 // Contract with Java-level exception handlers: | |
434 // rax: exception | |
435 // rdx: throwing pc | |
436 // | |
437 // NOTE: At entry of this stub, exception-pc must be on stack !! | |
438 | |
439 address generate_forward_exception() { | |
440 StubCodeMark mark(this, "StubRoutines", "forward exception"); | |
441 address start = __ pc(); | |
442 | |
443 // Upon entry, the sp points to the return address returning into | |
444 // Java (interpreted or compiled) code; i.e., the return address | |
445 // becomes the throwing pc. | |
446 // | |
447 // Arguments pushed before the runtime call are still on the stack | |
448 // but the exception handler will reset the stack pointer -> | |
449 // ignore them. A potential result in registers can be ignored as | |
450 // well. | |
451 | |
452 #ifdef ASSERT | |
453 // make sure this code is only executed if there is a pending exception | |
454 { | |
455 Label L; | |
456 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL); | |
457 __ jcc(Assembler::notEqual, L); | |
458 __ stop("StubRoutines::forward exception: no pending exception (1)"); | |
459 __ bind(L); | |
460 } | |
461 #endif | |
462 | |
463 // compute exception handler into rbx | |
464 __ movq(c_rarg0, Address(rsp, 0)); | |
465 BLOCK_COMMENT("call exception_handler_for_return_address"); | |
466 __ call_VM_leaf(CAST_FROM_FN_PTR(address, | |
467 SharedRuntime::exception_handler_for_return_address), | |
468 c_rarg0); | |
469 __ movq(rbx, rax); | |
470 | |
471 // setup rax & rdx, remove return address & clear pending exception | |
472 __ popq(rdx); | |
473 __ movq(rax, Address(r15_thread, Thread::pending_exception_offset())); | |
474 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD); | |
475 | |
476 #ifdef ASSERT | |
477 // make sure exception is set | |
478 { | |
479 Label L; | |
480 __ testq(rax, rax); | |
481 __ jcc(Assembler::notEqual, L); | |
482 __ stop("StubRoutines::forward exception: no pending exception (2)"); | |
483 __ bind(L); | |
484 } | |
485 #endif | |
486 | |
487 // continue at exception handler (return address removed) | |
488 // rax: exception | |
489 // rbx: exception handler | |
490 // rdx: throwing pc | |
491 __ verify_oop(rax); | |
492 __ jmp(rbx); | |
493 | |
494 return start; | |
495 } | |
496 | |
497 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest) | |
498 // | |
499 // Arguments : | |
500 // c_rarg0: exchange_value | |
501 // c_rarg0: dest | |
502 // | |
503 // Result: | |
504 // *dest <- ex, return (orig *dest) | |
505 address generate_atomic_xchg() { | |
506 StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); | |
507 address start = __ pc(); | |
508 | |
509 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow | |
510 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK | |
511 __ ret(0); | |
512 | |
513 return start; | |
514 } | |
515 | |
516 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) | |
517 // | |
518 // Arguments : | |
519 // c_rarg0: exchange_value | |
520 // c_rarg1: dest | |
521 // | |
522 // Result: | |
523 // *dest <- ex, return (orig *dest) | |
524 address generate_atomic_xchg_ptr() { | |
525 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); | |
526 address start = __ pc(); | |
527 | |
528 __ movq(rax, c_rarg0); // Copy to eax we need a return value anyhow | |
529 __ xchgq(rax, Address(c_rarg1, 0)); // automatic LOCK | |
530 __ ret(0); | |
531 | |
532 return start; | |
533 } | |
534 | |
535 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, | |
536 // jint compare_value) | |
537 // | |
538 // Arguments : | |
539 // c_rarg0: exchange_value | |
540 // c_rarg1: dest | |
541 // c_rarg2: compare_value | |
542 // | |
543 // Result: | |
544 // if ( compare_value == *dest ) { | |
545 // *dest = exchange_value | |
546 // return compare_value; | |
547 // else | |
548 // return *dest; | |
549 address generate_atomic_cmpxchg() { | |
550 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); | |
551 address start = __ pc(); | |
552 | |
553 __ movl(rax, c_rarg2); | |
554 if ( os::is_MP() ) __ lock(); | |
555 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); | |
556 __ ret(0); | |
557 | |
558 return start; | |
559 } | |
560 | |
561 // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value, | |
562 // volatile jlong* dest, | |
563 // jlong compare_value) | |
564 // Arguments : | |
565 // c_rarg0: exchange_value | |
566 // c_rarg1: dest | |
567 // c_rarg2: compare_value | |
568 // | |
569 // Result: | |
570 // if ( compare_value == *dest ) { | |
571 // *dest = exchange_value | |
572 // return compare_value; | |
573 // else | |
574 // return *dest; | |
575 address generate_atomic_cmpxchg_long() { | |
576 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); | |
577 address start = __ pc(); | |
578 | |
579 __ movq(rax, c_rarg2); | |
580 if ( os::is_MP() ) __ lock(); | |
581 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); | |
582 __ ret(0); | |
583 | |
584 return start; | |
585 } | |
586 | |
587 // Support for jint atomic::add(jint add_value, volatile jint* dest) | |
588 // | |
589 // Arguments : | |
590 // c_rarg0: add_value | |
591 // c_rarg1: dest | |
592 // | |
593 // Result: | |
594 // *dest += add_value | |
595 // return *dest; | |
596 address generate_atomic_add() { | |
597 StubCodeMark mark(this, "StubRoutines", "atomic_add"); | |
598 address start = __ pc(); | |
599 | |
600 __ movl(rax, c_rarg0); | |
601 if ( os::is_MP() ) __ lock(); | |
602 __ xaddl(Address(c_rarg1, 0), c_rarg0); | |
603 __ addl(rax, c_rarg0); | |
604 __ ret(0); | |
605 | |
606 return start; | |
607 } | |
608 | |
609 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) | |
610 // | |
611 // Arguments : | |
612 // c_rarg0: add_value | |
613 // c_rarg1: dest | |
614 // | |
615 // Result: | |
616 // *dest += add_value | |
617 // return *dest; | |
618 address generate_atomic_add_ptr() { | |
619 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr"); | |
620 address start = __ pc(); | |
621 | |
622 __ movq(rax, c_rarg0); // Copy to eax we need a return value anyhow | |
623 if ( os::is_MP() ) __ lock(); | |
624 __ xaddl(Address(c_rarg1, 0), c_rarg0); | |
625 __ addl(rax, c_rarg0); | |
626 __ ret(0); | |
627 | |
628 return start; | |
629 } | |
630 | |
631 // Support for intptr_t OrderAccess::fence() | |
632 // | |
633 // Arguments : | |
634 // | |
635 // Result: | |
636 address generate_orderaccess_fence() { | |
637 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence"); | |
638 address start = __ pc(); | |
639 __ mfence(); | |
640 __ ret(0); | |
641 | |
642 return start; | |
643 } | |
644 | |
645 // Support for intptr_t get_previous_fp() | |
646 // | |
647 // This routine is used to find the previous frame pointer for the | |
648 // caller (current_frame_guess). This is used as part of debugging | |
649 // ps() is seemingly lost trying to find frames. | |
650 // This code assumes that caller current_frame_guess) has a frame. | |
651 address generate_get_previous_fp() { | |
652 StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); | |
653 const Address old_fp(rbp, 0); | |
654 const Address older_fp(rax, 0); | |
655 address start = __ pc(); | |
656 | |
657 __ enter(); | |
658 __ movq(rax, old_fp); // callers fp | |
659 __ movq(rax, older_fp); // the frame for ps() | |
660 __ popq(rbp); | |
661 __ ret(0); | |
662 | |
663 return start; | |
664 } | |
665 | |
666 //---------------------------------------------------------------------------------------------------- | |
667 // Support for void verify_mxcsr() | |
668 // | |
669 // This routine is used with -Xcheck:jni to verify that native | |
670 // JNI code does not return to Java code without restoring the | |
671 // MXCSR register to our expected state. | |
672 | |
673 address generate_verify_mxcsr() { | |
674 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr"); | |
675 address start = __ pc(); | |
676 | |
677 const Address mxcsr_save(rsp, 0); | |
678 | |
679 if (CheckJNICalls) { | |
680 Label ok_ret; | |
681 __ pushq(rax); | |
682 __ subq(rsp, wordSize); // allocate a temp location | |
683 __ stmxcsr(mxcsr_save); | |
684 __ movl(rax, mxcsr_save); | |
685 __ andl(rax, MXCSR_MASK); // Only check control and mask bits | |
686 __ cmpl(rax, *(int *)(StubRoutines::amd64::mxcsr_std())); | |
687 __ jcc(Assembler::equal, ok_ret); | |
688 | |
689 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall"); | |
690 | |
691 __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std())); | |
692 | |
693 __ bind(ok_ret); | |
694 __ addq(rsp, wordSize); | |
695 __ popq(rax); | |
696 } | |
697 | |
698 __ ret(0); | |
699 | |
700 return start; | |
701 } | |
702 | |
703 address generate_f2i_fixup() { | |
704 StubCodeMark mark(this, "StubRoutines", "f2i_fixup"); | |
705 Address inout(rsp, 5 * wordSize); // return address + 4 saves | |
706 | |
707 address start = __ pc(); | |
708 | |
709 Label L; | |
710 | |
711 __ pushq(rax); | |
712 __ pushq(c_rarg3); | |
713 __ pushq(c_rarg2); | |
714 __ pushq(c_rarg1); | |
715 | |
716 __ movl(rax, 0x7f800000); | |
717 __ xorl(c_rarg3, c_rarg3); | |
718 __ movl(c_rarg2, inout); | |
719 __ movl(c_rarg1, c_rarg2); | |
720 __ andl(c_rarg1, 0x7fffffff); | |
721 __ cmpl(rax, c_rarg1); // NaN? -> 0 | |
722 __ jcc(Assembler::negative, L); | |
723 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint | |
724 __ movl(c_rarg3, 0x80000000); | |
725 __ movl(rax, 0x7fffffff); | |
726 __ cmovl(Assembler::positive, c_rarg3, rax); | |
727 | |
728 __ bind(L); | |
729 __ movq(inout, c_rarg3); | |
730 | |
731 __ popq(c_rarg1); | |
732 __ popq(c_rarg2); | |
733 __ popq(c_rarg3); | |
734 __ popq(rax); | |
735 | |
736 __ ret(0); | |
737 | |
738 return start; | |
739 } | |
740 | |
741 address generate_f2l_fixup() { | |
742 StubCodeMark mark(this, "StubRoutines", "f2l_fixup"); | |
743 Address inout(rsp, 5 * wordSize); // return address + 4 saves | |
744 address start = __ pc(); | |
745 | |
746 Label L; | |
747 | |
748 __ pushq(rax); | |
749 __ pushq(c_rarg3); | |
750 __ pushq(c_rarg2); | |
751 __ pushq(c_rarg1); | |
752 | |
753 __ movl(rax, 0x7f800000); | |
754 __ xorl(c_rarg3, c_rarg3); | |
755 __ movl(c_rarg2, inout); | |
756 __ movl(c_rarg1, c_rarg2); | |
757 __ andl(c_rarg1, 0x7fffffff); | |
758 __ cmpl(rax, c_rarg1); // NaN? -> 0 | |
759 __ jcc(Assembler::negative, L); | |
760 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong | |
761 __ mov64(c_rarg3, 0x8000000000000000); | |
762 __ mov64(rax, 0x7fffffffffffffff); | |
763 __ cmovq(Assembler::positive, c_rarg3, rax); | |
764 | |
765 __ bind(L); | |
766 __ movq(inout, c_rarg3); | |
767 | |
768 __ popq(c_rarg1); | |
769 __ popq(c_rarg2); | |
770 __ popq(c_rarg3); | |
771 __ popq(rax); | |
772 | |
773 __ ret(0); | |
774 | |
775 return start; | |
776 } | |
777 | |
778 address generate_d2i_fixup() { | |
779 StubCodeMark mark(this, "StubRoutines", "d2i_fixup"); | |
780 Address inout(rsp, 6 * wordSize); // return address + 5 saves | |
781 | |
782 address start = __ pc(); | |
783 | |
784 Label L; | |
785 | |
786 __ pushq(rax); | |
787 __ pushq(c_rarg3); | |
788 __ pushq(c_rarg2); | |
789 __ pushq(c_rarg1); | |
790 __ pushq(c_rarg0); | |
791 | |
792 __ movl(rax, 0x7ff00000); | |
793 __ movq(c_rarg2, inout); | |
794 __ movl(c_rarg3, c_rarg2); | |
795 __ movq(c_rarg1, c_rarg2); | |
796 __ movq(c_rarg0, c_rarg2); | |
797 __ negl(c_rarg3); | |
798 __ shrq(c_rarg1, 0x20); | |
799 __ orl(c_rarg3, c_rarg2); | |
800 __ andl(c_rarg1, 0x7fffffff); | |
801 __ xorl(c_rarg2, c_rarg2); | |
802 __ shrl(c_rarg3, 0x1f); | |
803 __ orl(c_rarg1, c_rarg3); | |
804 __ cmpl(rax, c_rarg1); | |
805 __ jcc(Assembler::negative, L); // NaN -> 0 | |
806 __ testq(c_rarg0, c_rarg0); // signed ? min_jint : max_jint | |
807 __ movl(c_rarg2, 0x80000000); | |
808 __ movl(rax, 0x7fffffff); | |
809 __ cmovl(Assembler::positive, c_rarg2, rax); | |
810 | |
811 __ bind(L); | |
812 __ movq(inout, c_rarg2); | |
813 | |
814 __ popq(c_rarg0); | |
815 __ popq(c_rarg1); | |
816 __ popq(c_rarg2); | |
817 __ popq(c_rarg3); | |
818 __ popq(rax); | |
819 | |
820 __ ret(0); | |
821 | |
822 return start; | |
823 } | |
824 | |
825 address generate_d2l_fixup() { | |
826 StubCodeMark mark(this, "StubRoutines", "d2l_fixup"); | |
827 Address inout(rsp, 6 * wordSize); // return address + 5 saves | |
828 | |
829 address start = __ pc(); | |
830 | |
831 Label L; | |
832 | |
833 __ pushq(rax); | |
834 __ pushq(c_rarg3); | |
835 __ pushq(c_rarg2); | |
836 __ pushq(c_rarg1); | |
837 __ pushq(c_rarg0); | |
838 | |
839 __ movl(rax, 0x7ff00000); | |
840 __ movq(c_rarg2, inout); | |
841 __ movl(c_rarg3, c_rarg2); | |
842 __ movq(c_rarg1, c_rarg2); | |
843 __ movq(c_rarg0, c_rarg2); | |
844 __ negl(c_rarg3); | |
845 __ shrq(c_rarg1, 0x20); | |
846 __ orl(c_rarg3, c_rarg2); | |
847 __ andl(c_rarg1, 0x7fffffff); | |
848 __ xorl(c_rarg2, c_rarg2); | |
849 __ shrl(c_rarg3, 0x1f); | |
850 __ orl(c_rarg1, c_rarg3); | |
851 __ cmpl(rax, c_rarg1); | |
852 __ jcc(Assembler::negative, L); // NaN -> 0 | |
853 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong | |
854 __ mov64(c_rarg2, 0x8000000000000000); | |
855 __ mov64(rax, 0x7fffffffffffffff); | |
856 __ cmovq(Assembler::positive, c_rarg2, rax); | |
857 | |
858 __ bind(L); | |
859 __ movq(inout, c_rarg2); | |
860 | |
861 __ popq(c_rarg0); | |
862 __ popq(c_rarg1); | |
863 __ popq(c_rarg2); | |
864 __ popq(c_rarg3); | |
865 __ popq(rax); | |
866 | |
867 __ ret(0); | |
868 | |
869 return start; | |
870 } | |
871 | |
872 address generate_fp_mask(const char *stub_name, int64_t mask) { | |
873 StubCodeMark mark(this, "StubRoutines", stub_name); | |
874 | |
875 __ align(16); | |
876 address start = __ pc(); | |
877 | |
878 __ emit_data64( mask, relocInfo::none ); | |
879 __ emit_data64( mask, relocInfo::none ); | |
880 | |
881 return start; | |
882 } | |
883 | |
884 // The following routine generates a subroutine to throw an | |
885 // asynchronous UnknownError when an unsafe access gets a fault that | |
886 // could not be reasonably prevented by the programmer. (Example: | |
887 // SIGBUS/OBJERR.) | |
888 address generate_handler_for_unsafe_access() { | |
889 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); | |
890 address start = __ pc(); | |
891 | |
892 __ pushq(0); // hole for return address-to-be | |
893 __ pushaq(); // push registers | |
894 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord); | |
895 | |
896 __ subq(rsp, frame::arg_reg_save_area_bytes); | |
897 BLOCK_COMMENT("call handle_unsafe_access"); | |
898 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access))); | |
899 __ addq(rsp, frame::arg_reg_save_area_bytes); | |
900 | |
901 __ movq(next_pc, rax); // stuff next address | |
902 __ popaq(); | |
903 __ ret(0); // jump to next address | |
904 | |
905 return start; | |
906 } | |
907 | |
908 // Non-destructive plausibility checks for oops | |
909 // | |
910 // Arguments: | |
911 // all args on stack! | |
912 // | |
913 // Stack after saving c_rarg3: | |
914 // [tos + 0]: saved c_rarg3 | |
915 // [tos + 1]: saved c_rarg2 | |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
916 // [tos + 2]: saved r12 (several TemplateTable methods use it) |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
917 // [tos + 3]: saved flags |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
918 // [tos + 4]: return address |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
919 // * [tos + 5]: error message (char*) |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
920 // * [tos + 6]: object to verify (oop) |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
921 // * [tos + 7]: saved rax - saved by caller and bashed |
0 | 922 // * = popped on exit |
923 address generate_verify_oop() { | |
924 StubCodeMark mark(this, "StubRoutines", "verify_oop"); | |
925 address start = __ pc(); | |
926 | |
927 Label exit, error; | |
928 | |
929 __ pushfq(); | |
930 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr())); | |
931 | |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
932 __ pushq(r12); |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
933 |
0 | 934 // save c_rarg2 and c_rarg3 |
935 __ pushq(c_rarg2); | |
936 __ pushq(c_rarg3); | |
937 | |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
938 enum { |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
939 // After previous pushes. |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
940 oop_to_verify = 6 * wordSize, |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
941 saved_rax = 7 * wordSize, |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
942 |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
943 // Before the call to MacroAssembler::debug(), see below. |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
944 return_addr = 16 * wordSize, |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
945 error_msg = 17 * wordSize |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
946 }; |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
947 |
0 | 948 // get object |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
949 __ movq(rax, Address(rsp, oop_to_verify)); |
0 | 950 |
951 // make sure object is 'reasonable' | |
952 __ testq(rax, rax); | |
953 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK | |
954 // Check if the oop is in the right area of memory | |
955 __ movq(c_rarg2, rax); | |
956 __ movptr(c_rarg3, (int64_t) Universe::verify_oop_mask()); | |
957 __ andq(c_rarg2, c_rarg3); | |
958 __ movptr(c_rarg3, (int64_t) Universe::verify_oop_bits()); | |
959 __ cmpq(c_rarg2, c_rarg3); | |
960 __ jcc(Assembler::notZero, error); | |
961 | |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
962 // set r12 to heapbase for load_klass() |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
963 __ reinit_heapbase(); |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
964 |
0 | 965 // make sure klass is 'reasonable' |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
966 __ load_klass(rax, rax); // get klass |
0 | 967 __ testq(rax, rax); |
968 __ jcc(Assembler::zero, error); // if klass is NULL it is broken | |
969 // Check if the klass is in the right area of memory | |
970 __ movq(c_rarg2, rax); | |
971 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask()); | |
972 __ andq(c_rarg2, c_rarg3); | |
973 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits()); | |
974 __ cmpq(c_rarg2, c_rarg3); | |
975 __ jcc(Assembler::notZero, error); | |
976 | |
977 // make sure klass' klass is 'reasonable' | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
978 __ load_klass(rax, rax); |
0 | 979 __ testq(rax, rax); |
980 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken | |
981 // Check if the klass' klass is in the right area of memory | |
982 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask()); | |
983 __ andq(rax, c_rarg3); | |
984 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits()); | |
985 __ cmpq(rax, c_rarg3); | |
986 __ jcc(Assembler::notZero, error); | |
987 | |
988 // return if everything seems ok | |
989 __ bind(exit); | |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
990 __ movq(rax, Address(rsp, saved_rax)); // get saved rax back |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
991 __ popq(c_rarg3); // restore c_rarg3 |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
992 __ popq(c_rarg2); // restore c_rarg2 |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
993 __ popq(r12); // restore r12 |
0 | 994 __ popfq(); // restore flags |
995 __ ret(3 * wordSize); // pop caller saved stuff | |
996 | |
997 // handle errors | |
998 __ bind(error); | |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
999 __ movq(rax, Address(rsp, saved_rax)); // get saved rax back |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1000 __ popq(c_rarg3); // get saved c_rarg3 back |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1001 __ popq(c_rarg2); // get saved c_rarg2 back |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1002 __ popq(r12); // get saved r12 back |
0 | 1003 __ popfq(); // get saved flags off stack -- |
1004 // will be ignored | |
1005 | |
1006 __ pushaq(); // push registers | |
1007 // (rip is already | |
1008 // already pushed) | |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1009 // debug(char* msg, int64_t pc, int64_t regs[]) |
0 | 1010 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and |
1011 // pushed all the registers, so now the stack looks like: | |
1012 // [tos + 0] 16 saved registers | |
1013 // [tos + 16] return address | |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1014 // * [tos + 17] error message (char*) |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1015 // * [tos + 18] object to verify (oop) |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1016 // * [tos + 19] saved rax - saved by caller and bashed |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1017 // * = popped on exit |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1018 |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1019 __ movq(c_rarg0, Address(rsp, error_msg)); // pass address of error message |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1020 __ movq(c_rarg1, Address(rsp, return_addr)); // pass return address |
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1021 __ movq(c_rarg2, rsp); // pass address of regs on stack |
0 | 1022 __ movq(r12, rsp); // remember rsp |
1023 __ subq(rsp, frame::arg_reg_save_area_bytes);// windows | |
1024 __ andq(rsp, -16); // align stack as required by ABI | |
1025 BLOCK_COMMENT("call MacroAssembler::debug"); | |
1026 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug))); | |
1027 __ movq(rsp, r12); // restore rsp | |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1028 __ popaq(); // pop registers (includes r12) |
0 | 1029 __ ret(3 * wordSize); // pop caller saved stuff |
1030 | |
1031 return start; | |
1032 } | |
1033 | |
1034 static address disjoint_byte_copy_entry; | |
1035 static address disjoint_short_copy_entry; | |
1036 static address disjoint_int_copy_entry; | |
1037 static address disjoint_long_copy_entry; | |
1038 static address disjoint_oop_copy_entry; | |
1039 | |
1040 static address byte_copy_entry; | |
1041 static address short_copy_entry; | |
1042 static address int_copy_entry; | |
1043 static address long_copy_entry; | |
1044 static address oop_copy_entry; | |
1045 | |
1046 static address checkcast_copy_entry; | |
1047 | |
1048 // | |
1049 // Verify that a register contains clean 32-bits positive value | |
1050 // (high 32-bits are 0) so it could be used in 64-bits shifts. | |
1051 // | |
1052 // Input: | |
1053 // Rint - 32-bits value | |
1054 // Rtmp - scratch | |
1055 // | |
1056 void assert_clean_int(Register Rint, Register Rtmp) { | |
1057 #ifdef ASSERT | |
1058 Label L; | |
1059 assert_different_registers(Rtmp, Rint); | |
1060 __ movslq(Rtmp, Rint); | |
1061 __ cmpq(Rtmp, Rint); | |
124
b130b98db9cf
6689060: Escape Analysis does not work with Compressed Oops
kvn
parents:
113
diff
changeset
|
1062 __ jcc(Assembler::equal, L); |
0 | 1063 __ stop("high 32-bits of int value are not 0"); |
1064 __ bind(L); | |
1065 #endif | |
1066 } | |
1067 | |
1068 // Generate overlap test for array copy stubs | |
1069 // | |
1070 // Input: | |
1071 // c_rarg0 - from | |
1072 // c_rarg1 - to | |
1073 // c_rarg2 - element count | |
1074 // | |
1075 // Output: | |
1076 // rax - &from[element count - 1] | |
1077 // | |
1078 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) { | |
1079 assert(no_overlap_target != NULL, "must be generated"); | |
1080 array_overlap_test(no_overlap_target, NULL, sf); | |
1081 } | |
1082 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) { | |
1083 array_overlap_test(NULL, &L_no_overlap, sf); | |
1084 } | |
1085 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) { | |
1086 const Register from = c_rarg0; | |
1087 const Register to = c_rarg1; | |
1088 const Register count = c_rarg2; | |
1089 const Register end_from = rax; | |
1090 | |
1091 __ cmpq(to, from); | |
1092 __ leaq(end_from, Address(from, count, sf, 0)); | |
1093 if (NOLp == NULL) { | |
1094 ExternalAddress no_overlap(no_overlap_target); | |
1095 __ jump_cc(Assembler::belowEqual, no_overlap); | |
1096 __ cmpq(to, end_from); | |
1097 __ jump_cc(Assembler::aboveEqual, no_overlap); | |
1098 } else { | |
1099 __ jcc(Assembler::belowEqual, (*NOLp)); | |
1100 __ cmpq(to, end_from); | |
1101 __ jcc(Assembler::aboveEqual, (*NOLp)); | |
1102 } | |
1103 } | |
1104 | |
1105 // Shuffle first three arg regs on Windows into Linux/Solaris locations. | |
1106 // | |
1107 // Outputs: | |
1108 // rdi - rcx | |
1109 // rsi - rdx | |
1110 // rdx - r8 | |
1111 // rcx - r9 | |
1112 // | |
1113 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter | |
1114 // are non-volatile. r9 and r10 should not be used by the caller. | |
1115 // | |
1116 void setup_arg_regs(int nargs = 3) { | |
1117 const Register saved_rdi = r9; | |
1118 const Register saved_rsi = r10; | |
1119 assert(nargs == 3 || nargs == 4, "else fix"); | |
1120 #ifdef _WIN64 | |
1121 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9, | |
1122 "unexpected argument registers"); | |
1123 if (nargs >= 4) | |
1124 __ movq(rax, r9); // r9 is also saved_rdi | |
1125 __ movq(saved_rdi, rdi); | |
1126 __ movq(saved_rsi, rsi); | |
1127 __ movq(rdi, rcx); // c_rarg0 | |
1128 __ movq(rsi, rdx); // c_rarg1 | |
1129 __ movq(rdx, r8); // c_rarg2 | |
1130 if (nargs >= 4) | |
1131 __ movq(rcx, rax); // c_rarg3 (via rax) | |
1132 #else | |
1133 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx, | |
1134 "unexpected argument registers"); | |
1135 #endif | |
1136 } | |
1137 | |
1138 void restore_arg_regs() { | |
1139 const Register saved_rdi = r9; | |
1140 const Register saved_rsi = r10; | |
1141 #ifdef _WIN64 | |
1142 __ movq(rdi, saved_rdi); | |
1143 __ movq(rsi, saved_rsi); | |
1144 #endif | |
1145 } | |
1146 | |
1147 // Generate code for an array write pre barrier | |
1148 // | |
1149 // addr - starting address | |
1150 // count - element count | |
1151 // | |
1152 // Destroy no registers! | |
1153 // | |
1154 void gen_write_ref_array_pre_barrier(Register addr, Register count) { | |
1155 #if 0 // G1 - only | |
1156 assert_different_registers(addr, c_rarg1); | |
1157 assert_different_registers(count, c_rarg0); | |
1158 BarrierSet* bs = Universe::heap()->barrier_set(); | |
1159 switch (bs->kind()) { | |
1160 case BarrierSet::G1SATBCT: | |
1161 case BarrierSet::G1SATBCTLogging: | |
1162 { | |
1163 __ pushaq(); // push registers | |
1164 __ movq(c_rarg0, addr); | |
1165 __ movq(c_rarg1, count); | |
1166 __ call(RuntimeAddress(BarrierSet::static_write_ref_array_pre)); | |
1167 __ popaq(); | |
1168 } | |
1169 break; | |
1170 case BarrierSet::CardTableModRef: | |
1171 case BarrierSet::CardTableExtension: | |
1172 case BarrierSet::ModRef: | |
1173 break; | |
1174 default : | |
1175 ShouldNotReachHere(); | |
1176 | |
1177 } | |
1178 #endif // 0 G1 - only | |
1179 } | |
1180 | |
1181 // | |
1182 // Generate code for an array write post barrier | |
1183 // | |
1184 // Input: | |
1185 // start - register containing starting address of destination array | |
1186 // end - register containing ending address of destination array | |
1187 // scratch - scratch register | |
1188 // | |
1189 // The input registers are overwritten. | |
1190 // The ending address is inclusive. | |
1191 void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) { | |
1192 assert_different_registers(start, end, scratch); | |
1193 BarrierSet* bs = Universe::heap()->barrier_set(); | |
1194 switch (bs->kind()) { | |
1195 #if 0 // G1 - only | |
1196 case BarrierSet::G1SATBCT: | |
1197 case BarrierSet::G1SATBCTLogging: | |
1198 | |
1199 { | |
1200 __ pushaq(); // push registers (overkill) | |
1201 // must compute element count unless barrier set interface is changed (other platforms supply count) | |
1202 assert_different_registers(start, end, scratch); | |
1203 __ leaq(scratch, Address(end, wordSize)); | |
1204 __ subq(scratch, start); | |
1205 __ shrq(scratch, LogBytesPerWord); | |
1206 __ movq(c_rarg0, start); | |
1207 __ movq(c_rarg1, scratch); | |
1208 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post)); | |
1209 __ popaq(); | |
1210 } | |
1211 break; | |
1212 #endif // 0 G1 - only | |
1213 case BarrierSet::CardTableModRef: | |
1214 case BarrierSet::CardTableExtension: | |
1215 { | |
1216 CardTableModRefBS* ct = (CardTableModRefBS*)bs; | |
1217 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); | |
1218 | |
1219 Label L_loop; | |
1220 | |
1221 __ shrq(start, CardTableModRefBS::card_shift); | |
1222 __ shrq(end, CardTableModRefBS::card_shift); | |
1223 __ subq(end, start); // number of bytes to copy | |
1224 | |
1225 const Register count = end; // 'end' register contains bytes count now | |
1226 __ lea(scratch, ExternalAddress((address)ct->byte_map_base)); | |
1227 __ addq(start, scratch); | |
1228 __ BIND(L_loop); | |
1229 __ movb(Address(start, count, Address::times_1), 0); | |
1230 __ decrementq(count); | |
1231 __ jcc(Assembler::greaterEqual, L_loop); | |
1232 } | |
1233 } | |
1234 } | |
1235 | |
1236 // Copy big chunks forward | |
1237 // | |
1238 // Inputs: | |
1239 // end_from - source arrays end address | |
1240 // end_to - destination array end address | |
1241 // qword_count - 64-bits element count, negative | |
1242 // to - scratch | |
1243 // L_copy_32_bytes - entry label | |
1244 // L_copy_8_bytes - exit label | |
1245 // | |
1246 void copy_32_bytes_forward(Register end_from, Register end_to, | |
1247 Register qword_count, Register to, | |
1248 Label& L_copy_32_bytes, Label& L_copy_8_bytes) { | |
1249 DEBUG_ONLY(__ stop("enter at entry label, not here")); | |
1250 Label L_loop; | |
1251 __ align(16); | |
1252 __ BIND(L_loop); | |
1253 __ movq(to, Address(end_from, qword_count, Address::times_8, -24)); | |
1254 __ movq(Address(end_to, qword_count, Address::times_8, -24), to); | |
1255 __ movq(to, Address(end_from, qword_count, Address::times_8, -16)); | |
1256 __ movq(Address(end_to, qword_count, Address::times_8, -16), to); | |
1257 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8)); | |
1258 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to); | |
1259 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0)); | |
1260 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to); | |
1261 __ BIND(L_copy_32_bytes); | |
1262 __ addq(qword_count, 4); | |
1263 __ jcc(Assembler::lessEqual, L_loop); | |
1264 __ subq(qword_count, 4); | |
1265 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords | |
1266 } | |
1267 | |
1268 | |
1269 // Copy big chunks backward | |
1270 // | |
1271 // Inputs: | |
1272 // from - source arrays address | |
1273 // dest - destination array address | |
1274 // qword_count - 64-bits element count | |
1275 // to - scratch | |
1276 // L_copy_32_bytes - entry label | |
1277 // L_copy_8_bytes - exit label | |
1278 // | |
1279 void copy_32_bytes_backward(Register from, Register dest, | |
1280 Register qword_count, Register to, | |
1281 Label& L_copy_32_bytes, Label& L_copy_8_bytes) { | |
1282 DEBUG_ONLY(__ stop("enter at entry label, not here")); | |
1283 Label L_loop; | |
1284 __ align(16); | |
1285 __ BIND(L_loop); | |
1286 __ movq(to, Address(from, qword_count, Address::times_8, 24)); | |
1287 __ movq(Address(dest, qword_count, Address::times_8, 24), to); | |
1288 __ movq(to, Address(from, qword_count, Address::times_8, 16)); | |
1289 __ movq(Address(dest, qword_count, Address::times_8, 16), to); | |
1290 __ movq(to, Address(from, qword_count, Address::times_8, 8)); | |
1291 __ movq(Address(dest, qword_count, Address::times_8, 8), to); | |
1292 __ movq(to, Address(from, qword_count, Address::times_8, 0)); | |
1293 __ movq(Address(dest, qword_count, Address::times_8, 0), to); | |
1294 __ BIND(L_copy_32_bytes); | |
1295 __ subq(qword_count, 4); | |
1296 __ jcc(Assembler::greaterEqual, L_loop); | |
1297 __ addq(qword_count, 4); | |
1298 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords | |
1299 } | |
1300 | |
1301 | |
1302 // Arguments: | |
1303 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary | |
1304 // ignored | |
1305 // name - stub name string | |
1306 // | |
1307 // Inputs: | |
1308 // c_rarg0 - source array address | |
1309 // c_rarg1 - destination array address | |
1310 // c_rarg2 - element count, treated as ssize_t, can be zero | |
1311 // | |
1312 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, | |
1313 // we let the hardware handle it. The one to eight bytes within words, | |
1314 // dwords or qwords that span cache line boundaries will still be loaded | |
1315 // and stored atomically. | |
1316 // | |
1317 // Side Effects: | |
1318 // disjoint_byte_copy_entry is set to the no-overlap entry point | |
1319 // used by generate_conjoint_byte_copy(). | |
1320 // | |
1321 address generate_disjoint_byte_copy(bool aligned, const char *name) { | |
1322 __ align(CodeEntryAlignment); | |
1323 StubCodeMark mark(this, "StubRoutines", name); | |
1324 address start = __ pc(); | |
1325 | |
1326 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; | |
1327 Label L_copy_byte, L_exit; | |
1328 const Register from = rdi; // source array address | |
1329 const Register to = rsi; // destination array address | |
1330 const Register count = rdx; // elements count | |
1331 const Register byte_count = rcx; | |
1332 const Register qword_count = count; | |
1333 const Register end_from = from; // source array end address | |
1334 const Register end_to = to; // destination array end address | |
1335 // End pointers are inclusive, and if count is not zero they point | |
1336 // to the last unit copied: end_to[0] := end_from[0] | |
1337 | |
1338 __ enter(); // required for proper stackwalking of RuntimeStub frame | |
1339 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. | |
1340 | |
1341 disjoint_byte_copy_entry = __ pc(); | |
1342 BLOCK_COMMENT("Entry:"); | |
1343 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) | |
1344 | |
1345 setup_arg_regs(); // from => rdi, to => rsi, count => rdx | |
1346 // r9 and r10 may be used to save non-volatile registers | |
1347 | |
1348 // 'from', 'to' and 'count' are now valid | |
1349 __ movq(byte_count, count); | |
1350 __ shrq(count, 3); // count => qword_count | |
1351 | |
1352 // Copy from low to high addresses. Use 'to' as scratch. | |
1353 __ leaq(end_from, Address(from, qword_count, Address::times_8, -8)); | |
1354 __ leaq(end_to, Address(to, qword_count, Address::times_8, -8)); | |
1355 __ negq(qword_count); // make the count negative | |
1356 __ jmp(L_copy_32_bytes); | |
1357 | |
1358 // Copy trailing qwords | |
1359 __ BIND(L_copy_8_bytes); | |
1360 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); | |
1361 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); | |
1362 __ incrementq(qword_count); | |
1363 __ jcc(Assembler::notZero, L_copy_8_bytes); | |
1364 | |
1365 // Check for and copy trailing dword | |
1366 __ BIND(L_copy_4_bytes); | |
1367 __ testq(byte_count, 4); | |
1368 __ jccb(Assembler::zero, L_copy_2_bytes); | |
1369 __ movl(rax, Address(end_from, 8)); | |
1370 __ movl(Address(end_to, 8), rax); | |
1371 | |
1372 __ addq(end_from, 4); | |
1373 __ addq(end_to, 4); | |
1374 | |
1375 // Check for and copy trailing word | |
1376 __ BIND(L_copy_2_bytes); | |
1377 __ testq(byte_count, 2); | |
1378 __ jccb(Assembler::zero, L_copy_byte); | |
1379 __ movw(rax, Address(end_from, 8)); | |
1380 __ movw(Address(end_to, 8), rax); | |
1381 | |
1382 __ addq(end_from, 2); | |
1383 __ addq(end_to, 2); | |
1384 | |
1385 // Check for and copy trailing byte | |
1386 __ BIND(L_copy_byte); | |
1387 __ testq(byte_count, 1); | |
1388 __ jccb(Assembler::zero, L_exit); | |
1389 __ movb(rax, Address(end_from, 8)); | |
1390 __ movb(Address(end_to, 8), rax); | |
1391 | |
1392 __ BIND(L_exit); | |
1393 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); | |
1394 restore_arg_regs(); | |
1395 __ xorq(rax, rax); // return 0 | |
1396 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
1397 __ ret(0); | |
1398 | |
1399 // Copy in 32-bytes chunks | |
1400 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); | |
1401 __ jmp(L_copy_4_bytes); | |
1402 | |
1403 return start; | |
1404 } | |
1405 | |
1406 // Arguments: | |
1407 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary | |
1408 // ignored | |
1409 // name - stub name string | |
1410 // | |
1411 // Inputs: | |
1412 // c_rarg0 - source array address | |
1413 // c_rarg1 - destination array address | |
1414 // c_rarg2 - element count, treated as ssize_t, can be zero | |
1415 // | |
1416 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries, | |
1417 // we let the hardware handle it. The one to eight bytes within words, | |
1418 // dwords or qwords that span cache line boundaries will still be loaded | |
1419 // and stored atomically. | |
1420 // | |
1421 address generate_conjoint_byte_copy(bool aligned, const char *name) { | |
1422 __ align(CodeEntryAlignment); | |
1423 StubCodeMark mark(this, "StubRoutines", name); | |
1424 address start = __ pc(); | |
1425 | |
1426 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes; | |
1427 const Register from = rdi; // source array address | |
1428 const Register to = rsi; // destination array address | |
1429 const Register count = rdx; // elements count | |
1430 const Register byte_count = rcx; | |
1431 const Register qword_count = count; | |
1432 | |
1433 __ enter(); // required for proper stackwalking of RuntimeStub frame | |
1434 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. | |
1435 | |
1436 byte_copy_entry = __ pc(); | |
1437 BLOCK_COMMENT("Entry:"); | |
1438 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) | |
1439 | |
1440 array_overlap_test(disjoint_byte_copy_entry, Address::times_1); | |
1441 setup_arg_regs(); // from => rdi, to => rsi, count => rdx | |
1442 // r9 and r10 may be used to save non-volatile registers | |
1443 | |
1444 // 'from', 'to' and 'count' are now valid | |
1445 __ movq(byte_count, count); | |
1446 __ shrq(count, 3); // count => qword_count | |
1447 | |
1448 // Copy from high to low addresses. | |
1449 | |
1450 // Check for and copy trailing byte | |
1451 __ testq(byte_count, 1); | |
1452 __ jcc(Assembler::zero, L_copy_2_bytes); | |
1453 __ movb(rax, Address(from, byte_count, Address::times_1, -1)); | |
1454 __ movb(Address(to, byte_count, Address::times_1, -1), rax); | |
1455 __ decrementq(byte_count); // Adjust for possible trailing word | |
1456 | |
1457 // Check for and copy trailing word | |
1458 __ BIND(L_copy_2_bytes); | |
1459 __ testq(byte_count, 2); | |
1460 __ jcc(Assembler::zero, L_copy_4_bytes); | |
1461 __ movw(rax, Address(from, byte_count, Address::times_1, -2)); | |
1462 __ movw(Address(to, byte_count, Address::times_1, -2), rax); | |
1463 | |
1464 // Check for and copy trailing dword | |
1465 __ BIND(L_copy_4_bytes); | |
1466 __ testq(byte_count, 4); | |
1467 __ jcc(Assembler::zero, L_copy_32_bytes); | |
1468 __ movl(rax, Address(from, qword_count, Address::times_8)); | |
1469 __ movl(Address(to, qword_count, Address::times_8), rax); | |
1470 __ jmp(L_copy_32_bytes); | |
1471 | |
1472 // Copy trailing qwords | |
1473 __ BIND(L_copy_8_bytes); | |
1474 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); | |
1475 __ movq(Address(to, qword_count, Address::times_8, -8), rax); | |
1476 __ decrementq(qword_count); | |
1477 __ jcc(Assembler::notZero, L_copy_8_bytes); | |
1478 | |
1479 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); | |
1480 restore_arg_regs(); | |
1481 __ xorq(rax, rax); // return 0 | |
1482 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
1483 __ ret(0); | |
1484 | |
1485 // Copy in 32-bytes chunks | |
1486 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); | |
1487 | |
1488 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr); | |
1489 restore_arg_regs(); | |
1490 __ xorq(rax, rax); // return 0 | |
1491 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
1492 __ ret(0); | |
1493 | |
1494 return start; | |
1495 } | |
1496 | |
1497 // Arguments: | |
1498 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary | |
1499 // ignored | |
1500 // name - stub name string | |
1501 // | |
1502 // Inputs: | |
1503 // c_rarg0 - source array address | |
1504 // c_rarg1 - destination array address | |
1505 // c_rarg2 - element count, treated as ssize_t, can be zero | |
1506 // | |
1507 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we | |
1508 // let the hardware handle it. The two or four words within dwords | |
1509 // or qwords that span cache line boundaries will still be loaded | |
1510 // and stored atomically. | |
1511 // | |
1512 // Side Effects: | |
1513 // disjoint_short_copy_entry is set to the no-overlap entry point | |
1514 // used by generate_conjoint_short_copy(). | |
1515 // | |
1516 address generate_disjoint_short_copy(bool aligned, const char *name) { | |
1517 __ align(CodeEntryAlignment); | |
1518 StubCodeMark mark(this, "StubRoutines", name); | |
1519 address start = __ pc(); | |
1520 | |
1521 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit; | |
1522 const Register from = rdi; // source array address | |
1523 const Register to = rsi; // destination array address | |
1524 const Register count = rdx; // elements count | |
1525 const Register word_count = rcx; | |
1526 const Register qword_count = count; | |
1527 const Register end_from = from; // source array end address | |
1528 const Register end_to = to; // destination array end address | |
1529 // End pointers are inclusive, and if count is not zero they point | |
1530 // to the last unit copied: end_to[0] := end_from[0] | |
1531 | |
1532 __ enter(); // required for proper stackwalking of RuntimeStub frame | |
1533 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. | |
1534 | |
1535 disjoint_short_copy_entry = __ pc(); | |
1536 BLOCK_COMMENT("Entry:"); | |
1537 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) | |
1538 | |
1539 setup_arg_regs(); // from => rdi, to => rsi, count => rdx | |
1540 // r9 and r10 may be used to save non-volatile registers | |
1541 | |
1542 // 'from', 'to' and 'count' are now valid | |
1543 __ movq(word_count, count); | |
1544 __ shrq(count, 2); // count => qword_count | |
1545 | |
1546 // Copy from low to high addresses. Use 'to' as scratch. | |
1547 __ leaq(end_from, Address(from, qword_count, Address::times_8, -8)); | |
1548 __ leaq(end_to, Address(to, qword_count, Address::times_8, -8)); | |
1549 __ negq(qword_count); | |
1550 __ jmp(L_copy_32_bytes); | |
1551 | |
1552 // Copy trailing qwords | |
1553 __ BIND(L_copy_8_bytes); | |
1554 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); | |
1555 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); | |
1556 __ incrementq(qword_count); | |
1557 __ jcc(Assembler::notZero, L_copy_8_bytes); | |
1558 | |
1559 // Original 'dest' is trashed, so we can't use it as a | |
1560 // base register for a possible trailing word copy | |
1561 | |
1562 // Check for and copy trailing dword | |
1563 __ BIND(L_copy_4_bytes); | |
1564 __ testq(word_count, 2); | |
1565 __ jccb(Assembler::zero, L_copy_2_bytes); | |
1566 __ movl(rax, Address(end_from, 8)); | |
1567 __ movl(Address(end_to, 8), rax); | |
1568 | |
1569 __ addq(end_from, 4); | |
1570 __ addq(end_to, 4); | |
1571 | |
1572 // Check for and copy trailing word | |
1573 __ BIND(L_copy_2_bytes); | |
1574 __ testq(word_count, 1); | |
1575 __ jccb(Assembler::zero, L_exit); | |
1576 __ movw(rax, Address(end_from, 8)); | |
1577 __ movw(Address(end_to, 8), rax); | |
1578 | |
1579 __ BIND(L_exit); | |
1580 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); | |
1581 restore_arg_regs(); | |
1582 __ xorq(rax, rax); // return 0 | |
1583 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
1584 __ ret(0); | |
1585 | |
1586 // Copy in 32-bytes chunks | |
1587 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); | |
1588 __ jmp(L_copy_4_bytes); | |
1589 | |
1590 return start; | |
1591 } | |
1592 | |
1593 // Arguments: | |
1594 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary | |
1595 // ignored | |
1596 // name - stub name string | |
1597 // | |
1598 // Inputs: | |
1599 // c_rarg0 - source array address | |
1600 // c_rarg1 - destination array address | |
1601 // c_rarg2 - element count, treated as ssize_t, can be zero | |
1602 // | |
1603 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we | |
1604 // let the hardware handle it. The two or four words within dwords | |
1605 // or qwords that span cache line boundaries will still be loaded | |
1606 // and stored atomically. | |
1607 // | |
1608 address generate_conjoint_short_copy(bool aligned, const char *name) { | |
1609 __ align(CodeEntryAlignment); | |
1610 StubCodeMark mark(this, "StubRoutines", name); | |
1611 address start = __ pc(); | |
1612 | |
1613 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes; | |
1614 const Register from = rdi; // source array address | |
1615 const Register to = rsi; // destination array address | |
1616 const Register count = rdx; // elements count | |
1617 const Register word_count = rcx; | |
1618 const Register qword_count = count; | |
1619 | |
1620 __ enter(); // required for proper stackwalking of RuntimeStub frame | |
1621 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. | |
1622 | |
1623 short_copy_entry = __ pc(); | |
1624 BLOCK_COMMENT("Entry:"); | |
1625 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) | |
1626 | |
1627 array_overlap_test(disjoint_short_copy_entry, Address::times_2); | |
1628 setup_arg_regs(); // from => rdi, to => rsi, count => rdx | |
1629 // r9 and r10 may be used to save non-volatile registers | |
1630 | |
1631 // 'from', 'to' and 'count' are now valid | |
1632 __ movq(word_count, count); | |
1633 __ shrq(count, 2); // count => qword_count | |
1634 | |
1635 // Copy from high to low addresses. Use 'to' as scratch. | |
1636 | |
1637 // Check for and copy trailing word | |
1638 __ testq(word_count, 1); | |
1639 __ jccb(Assembler::zero, L_copy_4_bytes); | |
1640 __ movw(rax, Address(from, word_count, Address::times_2, -2)); | |
1641 __ movw(Address(to, word_count, Address::times_2, -2), rax); | |
1642 | |
1643 // Check for and copy trailing dword | |
1644 __ BIND(L_copy_4_bytes); | |
1645 __ testq(word_count, 2); | |
1646 __ jcc(Assembler::zero, L_copy_32_bytes); | |
1647 __ movl(rax, Address(from, qword_count, Address::times_8)); | |
1648 __ movl(Address(to, qword_count, Address::times_8), rax); | |
1649 __ jmp(L_copy_32_bytes); | |
1650 | |
1651 // Copy trailing qwords | |
1652 __ BIND(L_copy_8_bytes); | |
1653 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); | |
1654 __ movq(Address(to, qword_count, Address::times_8, -8), rax); | |
1655 __ decrementq(qword_count); | |
1656 __ jcc(Assembler::notZero, L_copy_8_bytes); | |
1657 | |
1658 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); | |
1659 restore_arg_regs(); | |
1660 __ xorq(rax, rax); // return 0 | |
1661 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
1662 __ ret(0); | |
1663 | |
1664 // Copy in 32-bytes chunks | |
1665 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); | |
1666 | |
1667 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr); | |
1668 restore_arg_regs(); | |
1669 __ xorq(rax, rax); // return 0 | |
1670 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
1671 __ ret(0); | |
1672 | |
1673 return start; | |
1674 } | |
1675 | |
1676 // Arguments: | |
1677 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary | |
1678 // ignored | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1679 // is_oop - true => oop array, so generate store check code |
0 | 1680 // name - stub name string |
1681 // | |
1682 // Inputs: | |
1683 // c_rarg0 - source array address | |
1684 // c_rarg1 - destination array address | |
1685 // c_rarg2 - element count, treated as ssize_t, can be zero | |
1686 // | |
1687 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let | |
1688 // the hardware handle it. The two dwords within qwords that span | |
1689 // cache line boundaries will still be loaded and stored atomicly. | |
1690 // | |
1691 // Side Effects: | |
1692 // disjoint_int_copy_entry is set to the no-overlap entry point | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1693 // used by generate_conjoint_int_oop_copy(). |
0 | 1694 // |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1695 address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) { |
0 | 1696 __ align(CodeEntryAlignment); |
1697 StubCodeMark mark(this, "StubRoutines", name); | |
1698 address start = __ pc(); | |
1699 | |
1700 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit; | |
1701 const Register from = rdi; // source array address | |
1702 const Register to = rsi; // destination array address | |
1703 const Register count = rdx; // elements count | |
1704 const Register dword_count = rcx; | |
1705 const Register qword_count = count; | |
1706 const Register end_from = from; // source array end address | |
1707 const Register end_to = to; // destination array end address | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1708 const Register saved_to = r11; // saved destination array address |
0 | 1709 // End pointers are inclusive, and if count is not zero they point |
1710 // to the last unit copied: end_to[0] := end_from[0] | |
1711 | |
1712 __ enter(); // required for proper stackwalking of RuntimeStub frame | |
1713 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. | |
1714 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1715 (is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry) = __ pc(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1716 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1717 if (is_oop) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1718 // no registers are destroyed by this call |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1719 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1720 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1721 |
0 | 1722 BLOCK_COMMENT("Entry:"); |
1723 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) | |
1724 | |
1725 setup_arg_regs(); // from => rdi, to => rsi, count => rdx | |
1726 // r9 and r10 may be used to save non-volatile registers | |
1727 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1728 if (is_oop) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1729 __ movq(saved_to, to); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1730 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1731 |
0 | 1732 // 'from', 'to' and 'count' are now valid |
1733 __ movq(dword_count, count); | |
1734 __ shrq(count, 1); // count => qword_count | |
1735 | |
1736 // Copy from low to high addresses. Use 'to' as scratch. | |
1737 __ leaq(end_from, Address(from, qword_count, Address::times_8, -8)); | |
1738 __ leaq(end_to, Address(to, qword_count, Address::times_8, -8)); | |
1739 __ negq(qword_count); | |
1740 __ jmp(L_copy_32_bytes); | |
1741 | |
1742 // Copy trailing qwords | |
1743 __ BIND(L_copy_8_bytes); | |
1744 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); | |
1745 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); | |
1746 __ incrementq(qword_count); | |
1747 __ jcc(Assembler::notZero, L_copy_8_bytes); | |
1748 | |
1749 // Check for and copy trailing dword | |
1750 __ BIND(L_copy_4_bytes); | |
1751 __ testq(dword_count, 1); // Only byte test since the value is 0 or 1 | |
1752 __ jccb(Assembler::zero, L_exit); | |
1753 __ movl(rax, Address(end_from, 8)); | |
1754 __ movl(Address(end_to, 8), rax); | |
1755 | |
1756 __ BIND(L_exit); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1757 if (is_oop) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1758 __ leaq(end_to, Address(saved_to, dword_count, Address::times_4, -4)); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1759 gen_write_ref_array_post_barrier(saved_to, end_to, rax); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1760 } |
0 | 1761 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); |
1762 restore_arg_regs(); | |
1763 __ xorq(rax, rax); // return 0 | |
1764 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
1765 __ ret(0); | |
1766 | |
1767 // Copy 32-bytes chunks | |
1768 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); | |
1769 __ jmp(L_copy_4_bytes); | |
1770 | |
1771 return start; | |
1772 } | |
1773 | |
1774 // Arguments: | |
1775 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary | |
1776 // ignored | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1777 // is_oop - true => oop array, so generate store check code |
0 | 1778 // name - stub name string |
1779 // | |
1780 // Inputs: | |
1781 // c_rarg0 - source array address | |
1782 // c_rarg1 - destination array address | |
1783 // c_rarg2 - element count, treated as ssize_t, can be zero | |
1784 // | |
1785 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let | |
1786 // the hardware handle it. The two dwords within qwords that span | |
1787 // cache line boundaries will still be loaded and stored atomicly. | |
1788 // | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1789 address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) { |
0 | 1790 __ align(CodeEntryAlignment); |
1791 StubCodeMark mark(this, "StubRoutines", name); | |
1792 address start = __ pc(); | |
1793 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1794 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes, L_exit; |
0 | 1795 const Register from = rdi; // source array address |
1796 const Register to = rsi; // destination array address | |
1797 const Register count = rdx; // elements count | |
1798 const Register dword_count = rcx; | |
1799 const Register qword_count = count; | |
1800 | |
1801 __ enter(); // required for proper stackwalking of RuntimeStub frame | |
1802 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. | |
1803 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1804 if (is_oop) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1805 // no registers are destroyed by this call |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1806 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1807 } |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1808 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1809 (is_oop ? oop_copy_entry : int_copy_entry) = __ pc(); |
0 | 1810 BLOCK_COMMENT("Entry:"); |
1811 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) | |
1812 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1813 array_overlap_test(is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1814 Address::times_4); |
0 | 1815 setup_arg_regs(); // from => rdi, to => rsi, count => rdx |
1816 // r9 and r10 may be used to save non-volatile registers | |
1817 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1818 assert_clean_int(count, rax); // Make sure 'count' is clean int. |
0 | 1819 // 'from', 'to' and 'count' are now valid |
1820 __ movq(dword_count, count); | |
1821 __ shrq(count, 1); // count => qword_count | |
1822 | |
1823 // Copy from high to low addresses. Use 'to' as scratch. | |
1824 | |
1825 // Check for and copy trailing dword | |
1826 __ testq(dword_count, 1); | |
1827 __ jcc(Assembler::zero, L_copy_32_bytes); | |
1828 __ movl(rax, Address(from, dword_count, Address::times_4, -4)); | |
1829 __ movl(Address(to, dword_count, Address::times_4, -4), rax); | |
1830 __ jmp(L_copy_32_bytes); | |
1831 | |
1832 // Copy trailing qwords | |
1833 __ BIND(L_copy_8_bytes); | |
1834 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); | |
1835 __ movq(Address(to, qword_count, Address::times_8, -8), rax); | |
1836 __ decrementq(qword_count); | |
1837 __ jcc(Assembler::notZero, L_copy_8_bytes); | |
1838 | |
1839 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1840 if (is_oop) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1841 __ jmp(L_exit); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1842 } |
0 | 1843 restore_arg_regs(); |
1844 __ xorq(rax, rax); // return 0 | |
1845 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
1846 __ ret(0); | |
1847 | |
1848 // Copy in 32-bytes chunks | |
1849 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); | |
1850 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1851 inc_counter_np(SharedRuntime::_jint_array_copy_ctr); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1852 __ bind(L_exit); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1853 if (is_oop) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1854 Register end_to = rdx; |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1855 __ leaq(end_to, Address(to, dword_count, Address::times_4, -4)); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1856 gen_write_ref_array_post_barrier(to, end_to, rax); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1857 } |
0 | 1858 restore_arg_regs(); |
1859 __ xorq(rax, rax); // return 0 | |
1860 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
1861 __ ret(0); | |
1862 | |
1863 return start; | |
1864 } | |
1865 | |
1866 // Arguments: | |
1867 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes | |
1868 // ignored | |
1869 // is_oop - true => oop array, so generate store check code | |
1870 // name - stub name string | |
1871 // | |
1872 // Inputs: | |
1873 // c_rarg0 - source array address | |
1874 // c_rarg1 - destination array address | |
1875 // c_rarg2 - element count, treated as ssize_t, can be zero | |
1876 // | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1877 // Side Effects: |
0 | 1878 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the |
1879 // no-overlap entry point used by generate_conjoint_long_oop_copy(). | |
1880 // | |
1881 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) { | |
1882 __ align(CodeEntryAlignment); | |
1883 StubCodeMark mark(this, "StubRoutines", name); | |
1884 address start = __ pc(); | |
1885 | |
1886 Label L_copy_32_bytes, L_copy_8_bytes, L_exit; | |
1887 const Register from = rdi; // source array address | |
1888 const Register to = rsi; // destination array address | |
1889 const Register qword_count = rdx; // elements count | |
1890 const Register end_from = from; // source array end address | |
1891 const Register end_to = rcx; // destination array end address | |
1892 const Register saved_to = to; | |
1893 // End pointers are inclusive, and if count is not zero they point | |
1894 // to the last unit copied: end_to[0] := end_from[0] | |
1895 | |
1896 __ enter(); // required for proper stackwalking of RuntimeStub frame | |
1897 // Save no-overlap entry point for generate_conjoint_long_oop_copy() | |
1898 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. | |
1899 | |
1900 if (is_oop) { | |
1901 disjoint_oop_copy_entry = __ pc(); | |
1902 // no registers are destroyed by this call | |
1903 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); | |
1904 } else { | |
1905 disjoint_long_copy_entry = __ pc(); | |
1906 } | |
1907 BLOCK_COMMENT("Entry:"); | |
1908 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) | |
1909 | |
1910 setup_arg_regs(); // from => rdi, to => rsi, count => rdx | |
1911 // r9 and r10 may be used to save non-volatile registers | |
1912 | |
1913 // 'from', 'to' and 'qword_count' are now valid | |
1914 | |
1915 // Copy from low to high addresses. Use 'to' as scratch. | |
1916 __ leaq(end_from, Address(from, qword_count, Address::times_8, -8)); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1917 __ leaq(end_to, Address(to, qword_count, Address::times_8, -8)); |
0 | 1918 __ negq(qword_count); |
1919 __ jmp(L_copy_32_bytes); | |
1920 | |
1921 // Copy trailing qwords | |
1922 __ BIND(L_copy_8_bytes); | |
1923 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8)); | |
1924 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax); | |
1925 __ incrementq(qword_count); | |
1926 __ jcc(Assembler::notZero, L_copy_8_bytes); | |
1927 | |
1928 if (is_oop) { | |
1929 __ jmp(L_exit); | |
1930 } else { | |
1931 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); | |
1932 restore_arg_regs(); | |
1933 __ xorq(rax, rax); // return 0 | |
1934 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
1935 __ ret(0); | |
1936 } | |
1937 | |
1938 // Copy 64-byte chunks | |
1939 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); | |
1940 | |
1941 if (is_oop) { | |
1942 __ BIND(L_exit); | |
1943 gen_write_ref_array_post_barrier(saved_to, end_to, rax); | |
1944 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); | |
1945 } else { | |
1946 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); | |
1947 } | |
1948 restore_arg_regs(); | |
1949 __ xorq(rax, rax); // return 0 | |
1950 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
1951 __ ret(0); | |
1952 | |
1953 return start; | |
1954 } | |
1955 | |
1956 // Arguments: | |
1957 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes | |
1958 // ignored | |
1959 // is_oop - true => oop array, so generate store check code | |
1960 // name - stub name string | |
1961 // | |
1962 // Inputs: | |
1963 // c_rarg0 - source array address | |
1964 // c_rarg1 - destination array address | |
1965 // c_rarg2 - element count, treated as ssize_t, can be zero | |
1966 // | |
1967 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) { | |
1968 __ align(CodeEntryAlignment); | |
1969 StubCodeMark mark(this, "StubRoutines", name); | |
1970 address start = __ pc(); | |
1971 | |
1972 Label L_copy_32_bytes, L_copy_8_bytes, L_exit; | |
1973 const Register from = rdi; // source array address | |
1974 const Register to = rsi; // destination array address | |
1975 const Register qword_count = rdx; // elements count | |
1976 const Register saved_count = rcx; | |
1977 | |
1978 __ enter(); // required for proper stackwalking of RuntimeStub frame | |
1979 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. | |
1980 | |
1981 address disjoint_copy_entry = NULL; | |
1982 if (is_oop) { | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1983 assert(!UseCompressedOops, "shouldn't be called for compressed oops"); |
0 | 1984 disjoint_copy_entry = disjoint_oop_copy_entry; |
1985 oop_copy_entry = __ pc(); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1986 array_overlap_test(disjoint_oop_copy_entry, Address::times_8); |
0 | 1987 } else { |
1988 disjoint_copy_entry = disjoint_long_copy_entry; | |
1989 long_copy_entry = __ pc(); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
1990 array_overlap_test(disjoint_long_copy_entry, Address::times_8); |
0 | 1991 } |
1992 BLOCK_COMMENT("Entry:"); | |
1993 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) | |
1994 | |
1995 array_overlap_test(disjoint_copy_entry, Address::times_8); | |
1996 setup_arg_regs(); // from => rdi, to => rsi, count => rdx | |
1997 // r9 and r10 may be used to save non-volatile registers | |
1998 | |
1999 // 'from', 'to' and 'qword_count' are now valid | |
2000 | |
2001 if (is_oop) { | |
2002 // Save to and count for store barrier | |
2003 __ movq(saved_count, qword_count); | |
2004 // No registers are destroyed by this call | |
2005 gen_write_ref_array_pre_barrier(to, saved_count); | |
2006 } | |
2007 | |
2008 __ jmp(L_copy_32_bytes); | |
2009 | |
2010 // Copy trailing qwords | |
2011 __ BIND(L_copy_8_bytes); | |
2012 __ movq(rax, Address(from, qword_count, Address::times_8, -8)); | |
2013 __ movq(Address(to, qword_count, Address::times_8, -8), rax); | |
2014 __ decrementq(qword_count); | |
2015 __ jcc(Assembler::notZero, L_copy_8_bytes); | |
2016 | |
2017 if (is_oop) { | |
2018 __ jmp(L_exit); | |
2019 } else { | |
2020 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); | |
2021 restore_arg_regs(); | |
2022 __ xorq(rax, rax); // return 0 | |
2023 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
2024 __ ret(0); | |
2025 } | |
2026 | |
2027 // Copy in 32-bytes chunks | |
2028 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes); | |
2029 | |
2030 if (is_oop) { | |
2031 __ BIND(L_exit); | |
2032 __ leaq(rcx, Address(to, saved_count, Address::times_8, -8)); | |
2033 gen_write_ref_array_post_barrier(to, rcx, rax); | |
2034 inc_counter_np(SharedRuntime::_oop_array_copy_ctr); | |
2035 } else { | |
2036 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr); | |
2037 } | |
2038 restore_arg_regs(); | |
2039 __ xorq(rax, rax); // return 0 | |
2040 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
2041 __ ret(0); | |
2042 | |
2043 return start; | |
2044 } | |
2045 | |
2046 | |
2047 // Helper for generating a dynamic type check. | |
2048 // Smashes no registers. | |
2049 void generate_type_check(Register sub_klass, | |
2050 Register super_check_offset, | |
2051 Register super_klass, | |
2052 Label& L_success) { | |
2053 assert_different_registers(sub_klass, super_check_offset, super_klass); | |
2054 | |
2055 BLOCK_COMMENT("type_check:"); | |
2056 | |
2057 Label L_miss; | |
2058 | |
2059 // a couple of useful fields in sub_klass: | |
2060 int ss_offset = (klassOopDesc::header_size() * HeapWordSize + | |
2061 Klass::secondary_supers_offset_in_bytes()); | |
2062 int sc_offset = (klassOopDesc::header_size() * HeapWordSize + | |
2063 Klass::secondary_super_cache_offset_in_bytes()); | |
2064 Address secondary_supers_addr(sub_klass, ss_offset); | |
2065 Address super_cache_addr( sub_klass, sc_offset); | |
2066 | |
2067 // if the pointers are equal, we are done (e.g., String[] elements) | |
2068 __ cmpq(super_klass, sub_klass); | |
2069 __ jcc(Assembler::equal, L_success); | |
2070 | |
2071 // check the supertype display: | |
2072 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); | |
2073 __ cmpq(super_klass, super_check_addr); // test the super type | |
2074 __ jcc(Assembler::equal, L_success); | |
2075 | |
2076 // if it was a primary super, we can just fail immediately | |
2077 __ cmpl(super_check_offset, sc_offset); | |
2078 __ jcc(Assembler::notEqual, L_miss); | |
2079 | |
2080 // Now do a linear scan of the secondary super-klass chain. | |
2081 // The repne_scan instruction uses fixed registers, which we must spill. | |
2082 // (We need a couple more temps in any case.) | |
2083 // This code is rarely used, so simplicity is a virtue here. | |
2084 inc_counter_np(SharedRuntime::_partial_subtype_ctr); | |
2085 { | |
2086 __ pushq(rax); | |
2087 __ pushq(rcx); | |
2088 __ pushq(rdi); | |
2089 assert_different_registers(sub_klass, super_klass, rax, rcx, rdi); | |
2090 | |
2091 __ movq(rdi, secondary_supers_addr); | |
2092 // Load the array length. | |
2093 __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes())); | |
2094 // Skip to start of data. | |
2095 __ addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); | |
2096 // Scan rcx words at [rdi] for occurance of rax | |
2097 // Set NZ/Z based on last compare | |
2098 __ movq(rax, super_klass); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2099 if (UseCompressedOops) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2100 // Compare against compressed form. Don't need to uncompress because |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2101 // looks like orig rax is restored in popq below. |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2102 __ encode_heap_oop(rax); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2103 __ repne_scanl(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2104 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2105 __ repne_scanq(); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2106 } |
0 | 2107 |
2108 // Unspill the temp. registers: | |
2109 __ popq(rdi); | |
2110 __ popq(rcx); | |
2111 __ popq(rax); | |
2112 | |
2113 __ jcc(Assembler::notEqual, L_miss); | |
2114 } | |
2115 | |
2116 // Success. Cache the super we found and proceed in triumph. | |
2117 __ movq(super_cache_addr, super_klass); // note: rax is dead | |
2118 __ jmp(L_success); | |
2119 | |
2120 // Fall through on failure! | |
2121 __ BIND(L_miss); | |
2122 } | |
2123 | |
2124 // | |
2125 // Generate checkcasting array copy stub | |
2126 // | |
2127 // Input: | |
2128 // c_rarg0 - source array address | |
2129 // c_rarg1 - destination array address | |
2130 // c_rarg2 - element count, treated as ssize_t, can be zero | |
2131 // c_rarg3 - size_t ckoff (super_check_offset) | |
2132 // not Win64 | |
2133 // c_rarg4 - oop ckval (super_klass) | |
2134 // Win64 | |
2135 // rsp+40 - oop ckval (super_klass) | |
2136 // | |
2137 // Output: | |
2138 // rax == 0 - success | |
2139 // rax == -1^K - failure, where K is partial transfer count | |
2140 // | |
2141 address generate_checkcast_copy(const char *name) { | |
2142 | |
2143 Label L_load_element, L_store_element, L_do_card_marks, L_done; | |
2144 | |
2145 // Input registers (after setup_arg_regs) | |
2146 const Register from = rdi; // source array address | |
2147 const Register to = rsi; // destination array address | |
2148 const Register length = rdx; // elements count | |
2149 const Register ckoff = rcx; // super_check_offset | |
2150 const Register ckval = r8; // super_klass | |
2151 | |
2152 // Registers used as temps (r13, r14 are save-on-entry) | |
2153 const Register end_from = from; // source array end address | |
2154 const Register end_to = r13; // destination array end address | |
2155 const Register count = rdx; // -(count_remaining) | |
2156 const Register r14_length = r14; // saved copy of length | |
2157 // End pointers are inclusive, and if length is not zero they point | |
2158 // to the last unit copied: end_to[0] := end_from[0] | |
2159 | |
2160 const Register rax_oop = rax; // actual oop copied | |
2161 const Register r11_klass = r11; // oop._klass | |
2162 | |
2163 //--------------------------------------------------------------- | |
2164 // Assembler stub will be used for this call to arraycopy | |
2165 // if the two arrays are subtypes of Object[] but the | |
2166 // destination array type is not equal to or a supertype | |
2167 // of the source type. Each element must be separately | |
2168 // checked. | |
2169 | |
2170 __ align(CodeEntryAlignment); | |
2171 StubCodeMark mark(this, "StubRoutines", name); | |
2172 address start = __ pc(); | |
2173 | |
2174 __ enter(); // required for proper stackwalking of RuntimeStub frame | |
2175 | |
2176 checkcast_copy_entry = __ pc(); | |
2177 BLOCK_COMMENT("Entry:"); | |
2178 | |
2179 #ifdef ASSERT | |
2180 // caller guarantees that the arrays really are different | |
2181 // otherwise, we would have to make conjoint checks | |
2182 { Label L; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2183 array_overlap_test(L, TIMES_OOP); |
0 | 2184 __ stop("checkcast_copy within a single array"); |
2185 __ bind(L); | |
2186 } | |
2187 #endif //ASSERT | |
2188 | |
2189 // allocate spill slots for r13, r14 | |
2190 enum { | |
2191 saved_r13_offset, | |
2192 saved_r14_offset, | |
2193 saved_rbp_offset, | |
2194 saved_rip_offset, | |
2195 saved_rarg0_offset | |
2196 }; | |
2197 __ subq(rsp, saved_rbp_offset * wordSize); | |
2198 __ movq(Address(rsp, saved_r13_offset * wordSize), r13); | |
2199 __ movq(Address(rsp, saved_r14_offset * wordSize), r14); | |
2200 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx | |
2201 // ckoff => rcx, ckval => r8 | |
2202 // r9 and r10 may be used to save non-volatile registers | |
2203 #ifdef _WIN64 | |
2204 // last argument (#4) is on stack on Win64 | |
2205 const int ckval_offset = saved_rarg0_offset + 4; | |
2206 __ movq(ckval, Address(rsp, ckval_offset * wordSize)); | |
2207 #endif | |
2208 | |
2209 // check that int operands are properly extended to size_t | |
2210 assert_clean_int(length, rax); | |
2211 assert_clean_int(ckoff, rax); | |
2212 | |
2213 #ifdef ASSERT | |
2214 BLOCK_COMMENT("assert consistent ckoff/ckval"); | |
2215 // The ckoff and ckval must be mutually consistent, | |
2216 // even though caller generates both. | |
2217 { Label L; | |
2218 int sco_offset = (klassOopDesc::header_size() * HeapWordSize + | |
2219 Klass::super_check_offset_offset_in_bytes()); | |
2220 __ cmpl(ckoff, Address(ckval, sco_offset)); | |
2221 __ jcc(Assembler::equal, L); | |
2222 __ stop("super_check_offset inconsistent"); | |
2223 __ bind(L); | |
2224 } | |
2225 #endif //ASSERT | |
2226 | |
2227 // Loop-invariant addresses. They are exclusive end pointers. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2228 Address end_from_addr(from, length, TIMES_OOP, 0); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2229 Address end_to_addr(to, length, TIMES_OOP, 0); |
0 | 2230 // Loop-variant addresses. They assume post-incremented count < 0. |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2231 Address from_element_addr(end_from, count, TIMES_OOP, 0); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2232 Address to_element_addr(end_to, count, TIMES_OOP, 0); |
0 | 2233 |
2234 gen_write_ref_array_pre_barrier(to, count); | |
2235 | |
2236 // Copy from low to high addresses, indexed from the end of each array. | |
2237 __ leaq(end_from, end_from_addr); | |
2238 __ leaq(end_to, end_to_addr); | |
2239 __ movq(r14_length, length); // save a copy of the length | |
2240 assert(length == count, ""); // else fix next line: | |
2241 __ negq(count); // negate and test the length | |
2242 __ jcc(Assembler::notZero, L_load_element); | |
2243 | |
2244 // Empty array: Nothing to do. | |
2245 __ xorq(rax, rax); // return 0 on (trivial) success | |
2246 __ jmp(L_done); | |
2247 | |
2248 // ======== begin loop ======== | |
2249 // (Loop is rotated; its entry is L_load_element.) | |
2250 // Loop control: | |
2251 // for (count = -count; count != 0; count++) | |
2252 // Base pointers src, dst are biased by 8*(count-1),to last element. | |
2253 __ align(16); | |
2254 | |
2255 __ BIND(L_store_element); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2256 __ store_heap_oop(to_element_addr, rax_oop); // store the oop |
0 | 2257 __ incrementq(count); // increment the count toward zero |
2258 __ jcc(Assembler::zero, L_do_card_marks); | |
2259 | |
2260 // ======== loop entry is here ======== | |
2261 __ BIND(L_load_element); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2262 __ load_heap_oop(rax_oop, from_element_addr); // load the oop |
0 | 2263 __ testq(rax_oop, rax_oop); |
2264 __ jcc(Assembler::zero, L_store_element); | |
2265 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2266 __ load_klass(r11_klass, rax_oop);// query the object klass |
0 | 2267 generate_type_check(r11_klass, ckoff, ckval, L_store_element); |
2268 // ======== end loop ======== | |
2269 | |
2270 // It was a real error; we must depend on the caller to finish the job. | |
2271 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops. | |
2272 // Emit GC store barriers for the oops we have copied (r14 + rdx), | |
2273 // and report their number to the caller. | |
2274 assert_different_registers(rax, r14_length, count, to, end_to, rcx); | |
2275 __ leaq(end_to, to_element_addr); | |
2276 gen_write_ref_array_post_barrier(to, end_to, rcx); | |
2277 __ movq(rax, r14_length); // original oops | |
2278 __ addq(rax, count); // K = (original - remaining) oops | |
2279 __ notq(rax); // report (-1^K) to caller | |
2280 __ jmp(L_done); | |
2281 | |
2282 // Come here on success only. | |
2283 __ BIND(L_do_card_marks); | |
2284 __ addq(end_to, -wordSize); // make an inclusive end pointer | |
2285 gen_write_ref_array_post_barrier(to, end_to, rcx); | |
2286 __ xorq(rax, rax); // return 0 on success | |
2287 | |
2288 // Common exit point (success or failure). | |
2289 __ BIND(L_done); | |
2290 __ movq(r13, Address(rsp, saved_r13_offset * wordSize)); | |
2291 __ movq(r14, Address(rsp, saved_r14_offset * wordSize)); | |
2292 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr); | |
2293 restore_arg_regs(); | |
2294 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
2295 __ ret(0); | |
2296 | |
2297 return start; | |
2298 } | |
2299 | |
2300 // | |
2301 // Generate 'unsafe' array copy stub | |
2302 // Though just as safe as the other stubs, it takes an unscaled | |
2303 // size_t argument instead of an element count. | |
2304 // | |
2305 // Input: | |
2306 // c_rarg0 - source array address | |
2307 // c_rarg1 - destination array address | |
2308 // c_rarg2 - byte count, treated as ssize_t, can be zero | |
2309 // | |
2310 // Examines the alignment of the operands and dispatches | |
2311 // to a long, int, short, or byte copy loop. | |
2312 // | |
2313 address generate_unsafe_copy(const char *name) { | |
2314 | |
2315 Label L_long_aligned, L_int_aligned, L_short_aligned; | |
2316 | |
2317 // Input registers (before setup_arg_regs) | |
2318 const Register from = c_rarg0; // source array address | |
2319 const Register to = c_rarg1; // destination array address | |
2320 const Register size = c_rarg2; // byte count (size_t) | |
2321 | |
2322 // Register used as a temp | |
2323 const Register bits = rax; // test copy of low bits | |
2324 | |
2325 __ align(CodeEntryAlignment); | |
2326 StubCodeMark mark(this, "StubRoutines", name); | |
2327 address start = __ pc(); | |
2328 | |
2329 __ enter(); // required for proper stackwalking of RuntimeStub frame | |
2330 | |
2331 // bump this on entry, not on exit: | |
2332 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr); | |
2333 | |
2334 __ movq(bits, from); | |
2335 __ orq(bits, to); | |
2336 __ orq(bits, size); | |
2337 | |
2338 __ testb(bits, BytesPerLong-1); | |
2339 __ jccb(Assembler::zero, L_long_aligned); | |
2340 | |
2341 __ testb(bits, BytesPerInt-1); | |
2342 __ jccb(Assembler::zero, L_int_aligned); | |
2343 | |
2344 __ testb(bits, BytesPerShort-1); | |
2345 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry)); | |
2346 | |
2347 __ BIND(L_short_aligned); | |
2348 __ shrq(size, LogBytesPerShort); // size => short_count | |
2349 __ jump(RuntimeAddress(short_copy_entry)); | |
2350 | |
2351 __ BIND(L_int_aligned); | |
2352 __ shrq(size, LogBytesPerInt); // size => int_count | |
2353 __ jump(RuntimeAddress(int_copy_entry)); | |
2354 | |
2355 __ BIND(L_long_aligned); | |
2356 __ shrq(size, LogBytesPerLong); // size => qword_count | |
2357 __ jump(RuntimeAddress(long_copy_entry)); | |
2358 | |
2359 return start; | |
2360 } | |
2361 | |
2362 // Perform range checks on the proposed arraycopy. | |
2363 // Kills temp, but nothing else. | |
2364 // Also, clean the sign bits of src_pos and dst_pos. | |
2365 void arraycopy_range_checks(Register src, // source array oop (c_rarg0) | |
2366 Register src_pos, // source position (c_rarg1) | |
2367 Register dst, // destination array oo (c_rarg2) | |
2368 Register dst_pos, // destination position (c_rarg3) | |
2369 Register length, | |
2370 Register temp, | |
2371 Label& L_failed) { | |
2372 BLOCK_COMMENT("arraycopy_range_checks:"); | |
2373 | |
2374 // if (src_pos + length > arrayOop(src)->length()) FAIL; | |
2375 __ movl(temp, length); | |
2376 __ addl(temp, src_pos); // src_pos + length | |
2377 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes())); | |
2378 __ jcc(Assembler::above, L_failed); | |
2379 | |
2380 // if (dst_pos + length > arrayOop(dst)->length()) FAIL; | |
2381 __ movl(temp, length); | |
2382 __ addl(temp, dst_pos); // dst_pos + length | |
2383 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes())); | |
2384 __ jcc(Assembler::above, L_failed); | |
2385 | |
2386 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'. | |
2387 // Move with sign extension can be used since they are positive. | |
2388 __ movslq(src_pos, src_pos); | |
2389 __ movslq(dst_pos, dst_pos); | |
2390 | |
2391 BLOCK_COMMENT("arraycopy_range_checks done"); | |
2392 } | |
2393 | |
2394 // | |
2395 // Generate generic array copy stubs | |
2396 // | |
2397 // Input: | |
2398 // c_rarg0 - src oop | |
2399 // c_rarg1 - src_pos (32-bits) | |
2400 // c_rarg2 - dst oop | |
2401 // c_rarg3 - dst_pos (32-bits) | |
2402 // not Win64 | |
2403 // c_rarg4 - element count (32-bits) | |
2404 // Win64 | |
2405 // rsp+40 - element count (32-bits) | |
2406 // | |
2407 // Output: | |
2408 // rax == 0 - success | |
2409 // rax == -1^K - failure, where K is partial transfer count | |
2410 // | |
2411 address generate_generic_copy(const char *name) { | |
2412 | |
2413 Label L_failed, L_failed_0, L_objArray; | |
2414 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; | |
2415 | |
2416 // Input registers | |
2417 const Register src = c_rarg0; // source array oop | |
2418 const Register src_pos = c_rarg1; // source position | |
2419 const Register dst = c_rarg2; // destination array oop | |
2420 const Register dst_pos = c_rarg3; // destination position | |
2421 // elements count is on stack on Win64 | |
2422 #ifdef _WIN64 | |
2423 #define C_RARG4 Address(rsp, 6 * wordSize) | |
2424 #else | |
2425 #define C_RARG4 c_rarg4 | |
2426 #endif | |
2427 | |
2428 { int modulus = CodeEntryAlignment; | |
2429 int target = modulus - 5; // 5 = sizeof jmp(L_failed) | |
2430 int advance = target - (__ offset() % modulus); | |
2431 if (advance < 0) advance += modulus; | |
2432 if (advance > 0) __ nop(advance); | |
2433 } | |
2434 StubCodeMark mark(this, "StubRoutines", name); | |
2435 | |
2436 // Short-hop target to L_failed. Makes for denser prologue code. | |
2437 __ BIND(L_failed_0); | |
2438 __ jmp(L_failed); | |
2439 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed"); | |
2440 | |
2441 __ align(CodeEntryAlignment); | |
2442 address start = __ pc(); | |
2443 | |
2444 __ enter(); // required for proper stackwalking of RuntimeStub frame | |
2445 | |
2446 // bump this on entry, not on exit: | |
2447 inc_counter_np(SharedRuntime::_generic_array_copy_ctr); | |
2448 | |
2449 //----------------------------------------------------------------------- | |
2450 // Assembler stub will be used for this call to arraycopy | |
2451 // if the following conditions are met: | |
2452 // | |
2453 // (1) src and dst must not be null. | |
2454 // (2) src_pos must not be negative. | |
2455 // (3) dst_pos must not be negative. | |
2456 // (4) length must not be negative. | |
2457 // (5) src klass and dst klass should be the same and not NULL. | |
2458 // (6) src and dst should be arrays. | |
2459 // (7) src_pos + length must not exceed length of src. | |
2460 // (8) dst_pos + length must not exceed length of dst. | |
2461 // | |
2462 | |
2463 // if (src == NULL) return -1; | |
2464 __ testq(src, src); // src oop | |
2465 size_t j1off = __ offset(); | |
2466 __ jccb(Assembler::zero, L_failed_0); | |
2467 | |
2468 // if (src_pos < 0) return -1; | |
2469 __ testl(src_pos, src_pos); // src_pos (32-bits) | |
2470 __ jccb(Assembler::negative, L_failed_0); | |
2471 | |
2472 // if (dst == NULL) return -1; | |
2473 __ testq(dst, dst); // dst oop | |
2474 __ jccb(Assembler::zero, L_failed_0); | |
2475 | |
2476 // if (dst_pos < 0) return -1; | |
2477 __ testl(dst_pos, dst_pos); // dst_pos (32-bits) | |
2478 size_t j4off = __ offset(); | |
2479 __ jccb(Assembler::negative, L_failed_0); | |
2480 | |
2481 // The first four tests are very dense code, | |
2482 // but not quite dense enough to put four | |
2483 // jumps in a 16-byte instruction fetch buffer. | |
2484 // That's good, because some branch predicters | |
2485 // do not like jumps so close together. | |
2486 // Make sure of this. | |
2487 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps"); | |
2488 | |
2489 // registers used as temp | |
2490 const Register r11_length = r11; // elements count to copy | |
2491 const Register r10_src_klass = r10; // array klass | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2492 const Register r9_dst_klass = r9; // dest array klass |
0 | 2493 |
2494 // if (length < 0) return -1; | |
2495 __ movl(r11_length, C_RARG4); // length (elements count, 32-bits value) | |
2496 __ testl(r11_length, r11_length); | |
2497 __ jccb(Assembler::negative, L_failed_0); | |
2498 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2499 __ load_klass(r10_src_klass, src); |
0 | 2500 #ifdef ASSERT |
2501 // assert(src->klass() != NULL); | |
2502 BLOCK_COMMENT("assert klasses not null"); | |
2503 { Label L1, L2; | |
2504 __ testq(r10_src_klass, r10_src_klass); | |
2505 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL | |
2506 __ bind(L1); | |
2507 __ stop("broken null klass"); | |
2508 __ bind(L2); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2509 __ load_klass(r9_dst_klass, dst); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2510 __ cmpq(r9_dst_klass, 0); |
0 | 2511 __ jcc(Assembler::equal, L1); // this would be broken also |
2512 BLOCK_COMMENT("assert done"); | |
2513 } | |
2514 #endif | |
2515 | |
2516 // Load layout helper (32-bits) | |
2517 // | |
2518 // |array_tag| | header_size | element_type | |log2_element_size| | |
2519 // 32 30 24 16 8 2 0 | |
2520 // | |
2521 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 | |
2522 // | |
2523 | |
2524 int lh_offset = klassOopDesc::header_size() * HeapWordSize + | |
2525 Klass::layout_helper_offset_in_bytes(); | |
2526 | |
2527 const Register rax_lh = rax; // layout helper | |
2528 | |
2529 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); | |
2530 | |
2531 // Handle objArrays completely differently... | |
2532 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); | |
2533 __ cmpl(rax_lh, objArray_lh); | |
2534 __ jcc(Assembler::equal, L_objArray); | |
2535 | |
2536 // if (src->klass() != dst->klass()) return -1; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2537 __ load_klass(r9_dst_klass, dst); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2538 __ cmpq(r10_src_klass, r9_dst_klass); |
0 | 2539 __ jcc(Assembler::notEqual, L_failed); |
2540 | |
2541 // if (!src->is_Array()) return -1; | |
2542 __ cmpl(rax_lh, Klass::_lh_neutral_value); | |
2543 __ jcc(Assembler::greaterEqual, L_failed); | |
2544 | |
2545 // At this point, it is known to be a typeArray (array_tag 0x3). | |
2546 #ifdef ASSERT | |
2547 { Label L; | |
2548 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); | |
2549 __ jcc(Assembler::greaterEqual, L); | |
2550 __ stop("must be a primitive array"); | |
2551 __ bind(L); | |
2552 } | |
2553 #endif | |
2554 | |
2555 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, | |
2556 r10, L_failed); | |
2557 | |
2558 // typeArrayKlass | |
2559 // | |
2560 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize); | |
2561 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize); | |
2562 // | |
2563 | |
2564 const Register r10_offset = r10; // array offset | |
2565 const Register rax_elsize = rax_lh; // element size | |
2566 | |
2567 __ movl(r10_offset, rax_lh); | |
2568 __ shrl(r10_offset, Klass::_lh_header_size_shift); | |
2569 __ andq(r10_offset, Klass::_lh_header_size_mask); // array_offset | |
2570 __ addq(src, r10_offset); // src array offset | |
2571 __ addq(dst, r10_offset); // dst array offset | |
2572 BLOCK_COMMENT("choose copy loop based on element size"); | |
2573 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize | |
2574 | |
2575 // next registers should be set before the jump to corresponding stub | |
2576 const Register from = c_rarg0; // source array address | |
2577 const Register to = c_rarg1; // destination array address | |
2578 const Register count = c_rarg2; // elements count | |
2579 | |
2580 // 'from', 'to', 'count' registers should be set in such order | |
2581 // since they are the same as 'src', 'src_pos', 'dst'. | |
2582 | |
2583 __ BIND(L_copy_bytes); | |
2584 __ cmpl(rax_elsize, 0); | |
2585 __ jccb(Assembler::notEqual, L_copy_shorts); | |
2586 __ leaq(from, Address(src, src_pos, Address::times_1, 0));// src_addr | |
2587 __ leaq(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr | |
2588 __ movslq(count, r11_length); // length | |
2589 __ jump(RuntimeAddress(byte_copy_entry)); | |
2590 | |
2591 __ BIND(L_copy_shorts); | |
2592 __ cmpl(rax_elsize, LogBytesPerShort); | |
2593 __ jccb(Assembler::notEqual, L_copy_ints); | |
2594 __ leaq(from, Address(src, src_pos, Address::times_2, 0));// src_addr | |
2595 __ leaq(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr | |
2596 __ movslq(count, r11_length); // length | |
2597 __ jump(RuntimeAddress(short_copy_entry)); | |
2598 | |
2599 __ BIND(L_copy_ints); | |
2600 __ cmpl(rax_elsize, LogBytesPerInt); | |
2601 __ jccb(Assembler::notEqual, L_copy_longs); | |
2602 __ leaq(from, Address(src, src_pos, Address::times_4, 0));// src_addr | |
2603 __ leaq(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr | |
2604 __ movslq(count, r11_length); // length | |
2605 __ jump(RuntimeAddress(int_copy_entry)); | |
2606 | |
2607 __ BIND(L_copy_longs); | |
2608 #ifdef ASSERT | |
2609 { Label L; | |
2610 __ cmpl(rax_elsize, LogBytesPerLong); | |
2611 __ jcc(Assembler::equal, L); | |
2612 __ stop("must be long copy, but elsize is wrong"); | |
2613 __ bind(L); | |
2614 } | |
2615 #endif | |
2616 __ leaq(from, Address(src, src_pos, Address::times_8, 0));// src_addr | |
2617 __ leaq(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr | |
2618 __ movslq(count, r11_length); // length | |
2619 __ jump(RuntimeAddress(long_copy_entry)); | |
2620 | |
2621 // objArrayKlass | |
2622 __ BIND(L_objArray); | |
2623 // live at this point: r10_src_klass, src[_pos], dst[_pos] | |
2624 | |
2625 Label L_plain_copy, L_checkcast_copy; | |
2626 // test array classes for subtyping | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2627 __ load_klass(r9_dst_klass, dst); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2628 __ cmpq(r10_src_klass, r9_dst_klass); // usual case is exact equality |
0 | 2629 __ jcc(Assembler::notEqual, L_checkcast_copy); |
2630 | |
2631 // Identically typed arrays can be copied without element-wise checks. | |
2632 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, | |
2633 r10, L_failed); | |
2634 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2635 __ leaq(from, Address(src, src_pos, TIMES_OOP, |
0 | 2636 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2637 __ leaq(to, Address(dst, dst_pos, TIMES_OOP, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2638 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr |
0 | 2639 __ movslq(count, r11_length); // length |
2640 __ BIND(L_plain_copy); | |
2641 __ jump(RuntimeAddress(oop_copy_entry)); | |
2642 | |
2643 __ BIND(L_checkcast_copy); | |
2644 // live at this point: r10_src_klass, !r11_length | |
2645 { | |
2646 // assert(r11_length == C_RARG4); // will reload from here | |
2647 Register r11_dst_klass = r11; | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2648 __ load_klass(r11_dst_klass, dst); |
0 | 2649 |
2650 // Before looking at dst.length, make sure dst is also an objArray. | |
2651 __ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh); | |
2652 __ jcc(Assembler::notEqual, L_failed); | |
2653 | |
2654 // It is safe to examine both src.length and dst.length. | |
2655 #ifndef _WIN64 | |
2656 arraycopy_range_checks(src, src_pos, dst, dst_pos, C_RARG4, | |
2657 rax, L_failed); | |
2658 #else | |
2659 __ movl(r11_length, C_RARG4); // reload | |
2660 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, | |
2661 rax, L_failed); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2662 __ load_klass(r11_dst_klass, dst); // reload |
0 | 2663 #endif |
2664 | |
2665 // Marshal the base address arguments now, freeing registers. | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2666 __ leaq(from, Address(src, src_pos, TIMES_OOP, |
0 | 2667 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2668 __ leaq(to, Address(dst, dst_pos, TIMES_OOP, |
0 | 2669 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); |
2670 __ movl(count, C_RARG4); // length (reloaded) | |
2671 Register sco_temp = c_rarg3; // this register is free now | |
2672 assert_different_registers(from, to, count, sco_temp, | |
2673 r11_dst_klass, r10_src_klass); | |
2674 assert_clean_int(count, sco_temp); | |
2675 | |
2676 // Generate the type check. | |
2677 int sco_offset = (klassOopDesc::header_size() * HeapWordSize + | |
2678 Klass::super_check_offset_offset_in_bytes()); | |
2679 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); | |
2680 assert_clean_int(sco_temp, rax); | |
2681 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); | |
2682 | |
2683 // Fetch destination element klass from the objArrayKlass header. | |
2684 int ek_offset = (klassOopDesc::header_size() * HeapWordSize + | |
2685 objArrayKlass::element_klass_offset_in_bytes()); | |
2686 __ movq(r11_dst_klass, Address(r11_dst_klass, ek_offset)); | |
2687 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); | |
2688 assert_clean_int(sco_temp, rax); | |
2689 | |
2690 // the checkcast_copy loop needs two extra arguments: | |
2691 assert(c_rarg3 == sco_temp, "#3 already in place"); | |
2692 __ movq(C_RARG4, r11_dst_klass); // dst.klass.element_klass | |
2693 __ jump(RuntimeAddress(checkcast_copy_entry)); | |
2694 } | |
2695 | |
2696 __ BIND(L_failed); | |
2697 __ xorq(rax, rax); | |
2698 __ notq(rax); // return -1 | |
2699 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
2700 __ ret(0); | |
2701 | |
2702 return start; | |
2703 } | |
2704 | |
2705 #undef length_arg | |
2706 | |
2707 void generate_arraycopy_stubs() { | |
2708 // Call the conjoint generation methods immediately after | |
2709 // the disjoint ones so that short branches from the former | |
2710 // to the latter can be generated. | |
2711 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy"); | |
2712 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy"); | |
2713 | |
2714 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); | |
2715 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); | |
2716 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2717 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, "jint_disjoint_arraycopy"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2718 StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, "jint_arraycopy"); |
0 | 2719 |
2720 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy"); | |
2721 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy"); | |
2722 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2723 |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2724 if (UseCompressedOops) { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2725 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, "oop_disjoint_arraycopy"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2726 StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, "oop_arraycopy"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2727 } else { |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2728 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2729 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy"); |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
16
diff
changeset
|
2730 } |
0 | 2731 |
2732 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy"); | |
2733 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy"); | |
2734 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy"); | |
2735 | |
2736 // We don't generate specialized code for HeapWord-aligned source | |
2737 // arrays, so just use the code we've already generated | |
2738 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy; | |
2739 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy; | |
2740 | |
2741 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy; | |
2742 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy; | |
2743 | |
2744 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; | |
2745 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; | |
2746 | |
2747 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; | |
2748 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; | |
2749 | |
2750 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; | |
2751 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; | |
2752 } | |
2753 | |
2754 #undef __ | |
2755 #define __ masm-> | |
2756 | |
2757 // Continuation point for throwing of implicit exceptions that are | |
2758 // not handled in the current activation. Fabricates an exception | |
2759 // oop and initiates normal exception dispatching in this | |
2760 // frame. Since we need to preserve callee-saved values (currently | |
2761 // only for C2, but done for C1 as well) we need a callee-saved oop | |
2762 // map and therefore have to make these stubs into RuntimeStubs | |
2763 // rather than BufferBlobs. If the compiler needs all registers to | |
2764 // be preserved between the fault point and the exception handler | |
2765 // then it must assume responsibility for that in | |
2766 // AbstractCompiler::continuation_for_implicit_null_exception or | |
2767 // continuation_for_implicit_division_by_zero_exception. All other | |
2768 // implicit exceptions (e.g., NullPointerException or | |
2769 // AbstractMethodError on entry) are either at call sites or | |
2770 // otherwise assume that stack unwinding will be initiated, so | |
2771 // caller saved registers were assumed volatile in the compiler. | |
2772 address generate_throw_exception(const char* name, | |
2773 address runtime_entry, | |
2774 bool restore_saved_exception_pc) { | |
2775 // Information about frame layout at time of blocking runtime call. | |
2776 // Note that we only have to preserve callee-saved registers since | |
2777 // the compilers are responsible for supplying a continuation point | |
2778 // if they expect all registers to be preserved. | |
2779 enum layout { | |
2780 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt, | |
2781 rbp_off2, | |
2782 return_off, | |
2783 return_off2, | |
2784 framesize // inclusive of return address | |
2785 }; | |
2786 | |
2787 int insts_size = 512; | |
2788 int locs_size = 64; | |
2789 | |
2790 CodeBuffer code(name, insts_size, locs_size); | |
2791 OopMapSet* oop_maps = new OopMapSet(); | |
2792 MacroAssembler* masm = new MacroAssembler(&code); | |
2793 | |
2794 address start = __ pc(); | |
2795 | |
2796 // This is an inlined and slightly modified version of call_VM | |
2797 // which has the ability to fetch the return PC out of | |
2798 // thread-local storage and also sets up last_Java_sp slightly | |
2799 // differently than the real call_VM | |
2800 if (restore_saved_exception_pc) { | |
2801 __ movq(rax, | |
2802 Address(r15_thread, | |
2803 in_bytes(JavaThread::saved_exception_pc_offset()))); | |
2804 __ pushq(rax); | |
2805 } | |
2806 | |
2807 __ enter(); // required for proper stackwalking of RuntimeStub frame | |
2808 | |
2809 assert(is_even(framesize/2), "sp not 16-byte aligned"); | |
2810 | |
2811 // return address and rbp are already in place | |
2812 __ subq(rsp, (framesize-4) << LogBytesPerInt); // prolog | |
2813 | |
2814 int frame_complete = __ pc() - start; | |
2815 | |
2816 // Set up last_Java_sp and last_Java_fp | |
2817 __ set_last_Java_frame(rsp, rbp, NULL); | |
2818 | |
2819 // Call runtime | |
2820 __ movq(c_rarg0, r15_thread); | |
2821 BLOCK_COMMENT("call runtime_entry"); | |
2822 __ call(RuntimeAddress(runtime_entry)); | |
2823 | |
2824 // Generate oop map | |
2825 OopMap* map = new OopMap(framesize, 0); | |
2826 | |
2827 oop_maps->add_gc_map(__ pc() - start, map); | |
2828 | |
2829 __ reset_last_Java_frame(true, false); | |
2830 | |
2831 __ leave(); // required for proper stackwalking of RuntimeStub frame | |
2832 | |
2833 // check for pending exceptions | |
2834 #ifdef ASSERT | |
2835 Label L; | |
2836 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), | |
2837 (int) NULL); | |
2838 __ jcc(Assembler::notEqual, L); | |
2839 __ should_not_reach_here(); | |
2840 __ bind(L); | |
2841 #endif // ASSERT | |
2842 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); | |
2843 | |
2844 | |
2845 // codeBlob framesize is in words (not VMRegImpl::slot_size) | |
2846 RuntimeStub* stub = | |
2847 RuntimeStub::new_runtime_stub(name, | |
2848 &code, | |
2849 frame_complete, | |
2850 (framesize >> (LogBytesPerWord - LogBytesPerInt)), | |
2851 oop_maps, false); | |
2852 return stub->entry_point(); | |
2853 } | |
2854 | |
2855 // Initialization | |
2856 void generate_initial() { | |
2857 // Generates all stubs and initializes the entry points | |
2858 | |
2859 // This platform-specific stub is needed by generate_call_stub() | |
2860 StubRoutines::amd64::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80); | |
2861 | |
2862 // entry points that exist in all platforms Note: This is code | |
2863 // that could be shared among different platforms - however the | |
2864 // benefit seems to be smaller than the disadvantage of having a | |
2865 // much more complicated generator structure. See also comment in | |
2866 // stubRoutines.hpp. | |
2867 | |
2868 StubRoutines::_forward_exception_entry = generate_forward_exception(); | |
2869 | |
2870 StubRoutines::_call_stub_entry = | |
2871 generate_call_stub(StubRoutines::_call_stub_return_address); | |
2872 | |
2873 // is referenced by megamorphic call | |
2874 StubRoutines::_catch_exception_entry = generate_catch_exception(); | |
2875 | |
2876 // atomic calls | |
2877 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); | |
2878 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); | |
2879 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); | |
2880 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); | |
2881 StubRoutines::_atomic_add_entry = generate_atomic_add(); | |
2882 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr(); | |
2883 StubRoutines::_fence_entry = generate_orderaccess_fence(); | |
2884 | |
2885 StubRoutines::_handler_for_unsafe_access_entry = | |
2886 generate_handler_for_unsafe_access(); | |
2887 | |
2888 // platform dependent | |
2889 StubRoutines::amd64::_get_previous_fp_entry = generate_get_previous_fp(); | |
2890 | |
2891 StubRoutines::amd64::_verify_mxcsr_entry = generate_verify_mxcsr(); | |
2892 } | |
2893 | |
2894 void generate_all() { | |
2895 // Generates all stubs and initializes the entry points | |
2896 | |
2897 // These entry points require SharedInfo::stack0 to be set up in | |
2898 // non-core builds and need to be relocatable, so they each | |
2899 // fabricate a RuntimeStub internally. | |
2900 StubRoutines::_throw_AbstractMethodError_entry = | |
2901 generate_throw_exception("AbstractMethodError throw_exception", | |
2902 CAST_FROM_FN_PTR(address, | |
2903 SharedRuntime:: | |
2904 throw_AbstractMethodError), | |
2905 false); | |
2906 | |
16
f8236e79048a
6664627: Merge changes made only in hotspot 11 forward to jdk 7
dcubed
parents:
0
diff
changeset
|
2907 StubRoutines::_throw_IncompatibleClassChangeError_entry = |
f8236e79048a
6664627: Merge changes made only in hotspot 11 forward to jdk 7
dcubed
parents:
0
diff
changeset
|
2908 generate_throw_exception("IncompatibleClassChangeError throw_exception", |
f8236e79048a
6664627: Merge changes made only in hotspot 11 forward to jdk 7
dcubed
parents:
0
diff
changeset
|
2909 CAST_FROM_FN_PTR(address, |
f8236e79048a
6664627: Merge changes made only in hotspot 11 forward to jdk 7
dcubed
parents:
0
diff
changeset
|
2910 SharedRuntime:: |
f8236e79048a
6664627: Merge changes made only in hotspot 11 forward to jdk 7
dcubed
parents:
0
diff
changeset
|
2911 throw_IncompatibleClassChangeError), |
f8236e79048a
6664627: Merge changes made only in hotspot 11 forward to jdk 7
dcubed
parents:
0
diff
changeset
|
2912 false); |
f8236e79048a
6664627: Merge changes made only in hotspot 11 forward to jdk 7
dcubed
parents:
0
diff
changeset
|
2913 |
0 | 2914 StubRoutines::_throw_ArithmeticException_entry = |
2915 generate_throw_exception("ArithmeticException throw_exception", | |
2916 CAST_FROM_FN_PTR(address, | |
2917 SharedRuntime:: | |
2918 throw_ArithmeticException), | |
2919 true); | |
2920 | |
2921 StubRoutines::_throw_NullPointerException_entry = | |
2922 generate_throw_exception("NullPointerException throw_exception", | |
2923 CAST_FROM_FN_PTR(address, | |
2924 SharedRuntime:: | |
2925 throw_NullPointerException), | |
2926 true); | |
2927 | |
2928 StubRoutines::_throw_NullPointerException_at_call_entry = | |
2929 generate_throw_exception("NullPointerException at call throw_exception", | |
2930 CAST_FROM_FN_PTR(address, | |
2931 SharedRuntime:: | |
2932 throw_NullPointerException_at_call), | |
2933 false); | |
2934 | |
2935 StubRoutines::_throw_StackOverflowError_entry = | |
2936 generate_throw_exception("StackOverflowError throw_exception", | |
2937 CAST_FROM_FN_PTR(address, | |
2938 SharedRuntime:: | |
2939 throw_StackOverflowError), | |
2940 false); | |
2941 | |
2942 // entry points that are platform specific | |
2943 StubRoutines::amd64::_f2i_fixup = generate_f2i_fixup(); | |
2944 StubRoutines::amd64::_f2l_fixup = generate_f2l_fixup(); | |
2945 StubRoutines::amd64::_d2i_fixup = generate_d2i_fixup(); | |
2946 StubRoutines::amd64::_d2l_fixup = generate_d2l_fixup(); | |
2947 | |
2948 StubRoutines::amd64::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF); | |
2949 StubRoutines::amd64::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000); | |
2950 StubRoutines::amd64::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF); | |
2951 StubRoutines::amd64::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000); | |
2952 | |
2953 // support for verify_oop (must happen after universe_init) | |
2954 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop(); | |
2955 | |
2956 // arraycopy stubs used by compilers | |
2957 generate_arraycopy_stubs(); | |
2958 } | |
2959 | |
2960 public: | |
2961 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { | |
2962 if (all) { | |
2963 generate_all(); | |
2964 } else { | |
2965 generate_initial(); | |
2966 } | |
2967 } | |
2968 }; // end class declaration | |
2969 | |
2970 address StubGenerator::disjoint_byte_copy_entry = NULL; | |
2971 address StubGenerator::disjoint_short_copy_entry = NULL; | |
2972 address StubGenerator::disjoint_int_copy_entry = NULL; | |
2973 address StubGenerator::disjoint_long_copy_entry = NULL; | |
2974 address StubGenerator::disjoint_oop_copy_entry = NULL; | |
2975 | |
2976 address StubGenerator::byte_copy_entry = NULL; | |
2977 address StubGenerator::short_copy_entry = NULL; | |
2978 address StubGenerator::int_copy_entry = NULL; | |
2979 address StubGenerator::long_copy_entry = NULL; | |
2980 address StubGenerator::oop_copy_entry = NULL; | |
2981 | |
2982 address StubGenerator::checkcast_copy_entry = NULL; | |
2983 | |
2984 void StubGenerator_generate(CodeBuffer* code, bool all) { | |
2985 StubGenerator g(code, all); | |
2986 } |