comparison src/cpu/x86/vm/stubGenerator_x86_64.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children f8236e79048a
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_stubGenerator_x86_64.cpp.incl"
27
28 // Declaration and definition of StubGenerator (no .hpp file).
29 // For a more detailed description of the stub routine structure
30 // see the comment in stubRoutines.hpp
31
32 #define __ _masm->
33
34 #ifdef PRODUCT
35 #define BLOCK_COMMENT(str) /* nothing */
36 #else
37 #define BLOCK_COMMENT(str) __ block_comment(str)
38 #endif
39
40 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
41 const int MXCSR_MASK = 0xFFC0; // Mask out any pending exceptions
42
43 // Stub Code definitions
44
45 static address handle_unsafe_access() {
46 JavaThread* thread = JavaThread::current();
47 address pc = thread->saved_exception_pc();
48 // pc is the instruction which we must emulate
49 // doing a no-op is fine: return garbage from the load
50 // therefore, compute npc
51 address npc = Assembler::locate_next_instruction(pc);
52
53 // request an async exception
54 thread->set_pending_unsafe_access_error();
55
56 // return address of next instruction to execute
57 return npc;
58 }
59
60 class StubGenerator: public StubCodeGenerator {
61 private:
62
63 #ifdef PRODUCT
64 #define inc_counter_np(counter) (0)
65 #else
66 void inc_counter_np_(int& counter) {
67 __ incrementl(ExternalAddress((address)&counter));
68 }
69 #define inc_counter_np(counter) \
70 BLOCK_COMMENT("inc_counter " #counter); \
71 inc_counter_np_(counter);
72 #endif
73
74 // Call stubs are used to call Java from C
75 //
76 // Linux Arguments:
77 // c_rarg0: call wrapper address address
78 // c_rarg1: result address
79 // c_rarg2: result type BasicType
80 // c_rarg3: method methodOop
81 // c_rarg4: (interpreter) entry point address
82 // c_rarg5: parameters intptr_t*
83 // 16(rbp): parameter size (in words) int
84 // 24(rbp): thread Thread*
85 //
86 // [ return_from_Java ] <--- rsp
87 // [ argument word n ]
88 // ...
89 // -12 [ argument word 1 ]
90 // -11 [ saved r15 ] <--- rsp_after_call
91 // -10 [ saved r14 ]
92 // -9 [ saved r13 ]
93 // -8 [ saved r12 ]
94 // -7 [ saved rbx ]
95 // -6 [ call wrapper ]
96 // -5 [ result ]
97 // -4 [ result type ]
98 // -3 [ method ]
99 // -2 [ entry point ]
100 // -1 [ parameters ]
101 // 0 [ saved rbp ] <--- rbp
102 // 1 [ return address ]
103 // 2 [ parameter size ]
104 // 3 [ thread ]
105 //
106 // Windows Arguments:
107 // c_rarg0: call wrapper address address
108 // c_rarg1: result address
109 // c_rarg2: result type BasicType
110 // c_rarg3: method methodOop
111 // 48(rbp): (interpreter) entry point address
112 // 56(rbp): parameters intptr_t*
113 // 64(rbp): parameter size (in words) int
114 // 72(rbp): thread Thread*
115 //
116 // [ return_from_Java ] <--- rsp
117 // [ argument word n ]
118 // ...
119 // -8 [ argument word 1 ]
120 // -7 [ saved r15 ] <--- rsp_after_call
121 // -6 [ saved r14 ]
122 // -5 [ saved r13 ]
123 // -4 [ saved r12 ]
124 // -3 [ saved rdi ]
125 // -2 [ saved rsi ]
126 // -1 [ saved rbx ]
127 // 0 [ saved rbp ] <--- rbp
128 // 1 [ return address ]
129 // 2 [ call wrapper ]
130 // 3 [ result ]
131 // 4 [ result type ]
132 // 5 [ method ]
133 // 6 [ entry point ]
134 // 7 [ parameters ]
135 // 8 [ parameter size ]
136 // 9 [ thread ]
137 //
138 // Windows reserves the callers stack space for arguments 1-4.
139 // We spill c_rarg0-c_rarg3 to this space.
140
141 // Call stub stack layout word offsets from rbp
142 enum call_stub_layout {
143 #ifdef _WIN64
144 rsp_after_call_off = -7,
145 r15_off = rsp_after_call_off,
146 r14_off = -6,
147 r13_off = -5,
148 r12_off = -4,
149 rdi_off = -3,
150 rsi_off = -2,
151 rbx_off = -1,
152 rbp_off = 0,
153 retaddr_off = 1,
154 call_wrapper_off = 2,
155 result_off = 3,
156 result_type_off = 4,
157 method_off = 5,
158 entry_point_off = 6,
159 parameters_off = 7,
160 parameter_size_off = 8,
161 thread_off = 9
162 #else
163 rsp_after_call_off = -12,
164 mxcsr_off = rsp_after_call_off,
165 r15_off = -11,
166 r14_off = -10,
167 r13_off = -9,
168 r12_off = -8,
169 rbx_off = -7,
170 call_wrapper_off = -6,
171 result_off = -5,
172 result_type_off = -4,
173 method_off = -3,
174 entry_point_off = -2,
175 parameters_off = -1,
176 rbp_off = 0,
177 retaddr_off = 1,
178 parameter_size_off = 2,
179 thread_off = 3
180 #endif
181 };
182
183 address generate_call_stub(address& return_address) {
184 assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 &&
185 (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off,
186 "adjust this code");
187 StubCodeMark mark(this, "StubRoutines", "call_stub");
188 address start = __ pc();
189
190 // same as in generate_catch_exception()!
191 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
192
193 const Address call_wrapper (rbp, call_wrapper_off * wordSize);
194 const Address result (rbp, result_off * wordSize);
195 const Address result_type (rbp, result_type_off * wordSize);
196 const Address method (rbp, method_off * wordSize);
197 const Address entry_point (rbp, entry_point_off * wordSize);
198 const Address parameters (rbp, parameters_off * wordSize);
199 const Address parameter_size(rbp, parameter_size_off * wordSize);
200
201 // same as in generate_catch_exception()!
202 const Address thread (rbp, thread_off * wordSize);
203
204 const Address r15_save(rbp, r15_off * wordSize);
205 const Address r14_save(rbp, r14_off * wordSize);
206 const Address r13_save(rbp, r13_off * wordSize);
207 const Address r12_save(rbp, r12_off * wordSize);
208 const Address rbx_save(rbp, rbx_off * wordSize);
209
210 // stub code
211 __ enter();
212 __ subq(rsp, -rsp_after_call_off * wordSize);
213
214 // save register parameters
215 #ifndef _WIN64
216 __ movq(parameters, c_rarg5); // parameters
217 __ movq(entry_point, c_rarg4); // entry_point
218 #endif
219
220 __ movq(method, c_rarg3); // method
221 __ movl(result_type, c_rarg2); // result type
222 __ movq(result, c_rarg1); // result
223 __ movq(call_wrapper, c_rarg0); // call wrapper
224
225 // save regs belonging to calling function
226 __ movq(rbx_save, rbx);
227 __ movq(r12_save, r12);
228 __ movq(r13_save, r13);
229 __ movq(r14_save, r14);
230 __ movq(r15_save, r15);
231
232 #ifdef _WIN64
233 const Address rdi_save(rbp, rdi_off * wordSize);
234 const Address rsi_save(rbp, rsi_off * wordSize);
235
236 __ movq(rsi_save, rsi);
237 __ movq(rdi_save, rdi);
238 #else
239 const Address mxcsr_save(rbp, mxcsr_off * wordSize);
240 {
241 Label skip_ldmx;
242 __ stmxcsr(mxcsr_save);
243 __ movl(rax, mxcsr_save);
244 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
245 ExternalAddress mxcsr_std(StubRoutines::amd64::mxcsr_std());
246 __ cmp32(rax, mxcsr_std);
247 __ jcc(Assembler::equal, skip_ldmx);
248 __ ldmxcsr(mxcsr_std);
249 __ bind(skip_ldmx);
250 }
251 #endif
252
253 // Load up thread register
254 __ movq(r15_thread, thread);
255
256 #ifdef ASSERT
257 // make sure we have no pending exceptions
258 {
259 Label L;
260 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD);
261 __ jcc(Assembler::equal, L);
262 __ stop("StubRoutines::call_stub: entered with pending exception");
263 __ bind(L);
264 }
265 #endif
266
267 // pass parameters if any
268 BLOCK_COMMENT("pass parameters if any");
269 Label parameters_done;
270 __ movl(c_rarg3, parameter_size);
271 __ testl(c_rarg3, c_rarg3);
272 __ jcc(Assembler::zero, parameters_done);
273
274 Label loop;
275 __ movq(c_rarg2, parameters); // parameter pointer
276 __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
277 __ BIND(loop);
278 if (TaggedStackInterpreter) {
279 __ movq(rax, Address(c_rarg2, 0)); // get tag
280 __ addq(c_rarg2, wordSize); // advance to next tag
281 __ pushq(rax); // pass tag
282 }
283 __ movq(rax, Address(c_rarg2, 0)); // get parameter
284 __ addq(c_rarg2, wordSize); // advance to next parameter
285 __ decrementl(c_rarg1); // decrement counter
286 __ pushq(rax); // pass parameter
287 __ jcc(Assembler::notZero, loop);
288
289 // call Java function
290 __ BIND(parameters_done);
291 __ movq(rbx, method); // get methodOop
292 __ movq(c_rarg1, entry_point); // get entry_point
293 __ movq(r13, rsp); // set sender sp
294 BLOCK_COMMENT("call Java function");
295 __ call(c_rarg1);
296
297 BLOCK_COMMENT("call_stub_return_address:");
298 return_address = __ pc();
299
300 // store result depending on type (everything that is not
301 // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
302 __ movq(c_rarg0, result);
303 Label is_long, is_float, is_double, exit;
304 __ movl(c_rarg1, result_type);
305 __ cmpl(c_rarg1, T_OBJECT);
306 __ jcc(Assembler::equal, is_long);
307 __ cmpl(c_rarg1, T_LONG);
308 __ jcc(Assembler::equal, is_long);
309 __ cmpl(c_rarg1, T_FLOAT);
310 __ jcc(Assembler::equal, is_float);
311 __ cmpl(c_rarg1, T_DOUBLE);
312 __ jcc(Assembler::equal, is_double);
313
314 // handle T_INT case
315 __ movl(Address(c_rarg0, 0), rax);
316
317 __ BIND(exit);
318
319 // pop parameters
320 __ leaq(rsp, rsp_after_call);
321
322 #ifdef ASSERT
323 // verify that threads correspond
324 {
325 Label L, S;
326 __ cmpq(r15_thread, thread);
327 __ jcc(Assembler::notEqual, S);
328 __ get_thread(rbx);
329 __ cmpq(r15_thread, rbx);
330 __ jcc(Assembler::equal, L);
331 __ bind(S);
332 __ jcc(Assembler::equal, L);
333 __ stop("StubRoutines::call_stub: threads must correspond");
334 __ bind(L);
335 }
336 #endif
337
338 // restore regs belonging to calling function
339 __ movq(r15, r15_save);
340 __ movq(r14, r14_save);
341 __ movq(r13, r13_save);
342 __ movq(r12, r12_save);
343 __ movq(rbx, rbx_save);
344
345 #ifdef _WIN64
346 __ movq(rdi, rdi_save);
347 __ movq(rsi, rsi_save);
348 #else
349 __ ldmxcsr(mxcsr_save);
350 #endif
351
352 // restore rsp
353 __ addq(rsp, -rsp_after_call_off * wordSize);
354
355 // return
356 __ popq(rbp);
357 __ ret(0);
358
359 // handle return types different from T_INT
360 __ BIND(is_long);
361 __ movq(Address(c_rarg0, 0), rax);
362 __ jmp(exit);
363
364 __ BIND(is_float);
365 __ movflt(Address(c_rarg0, 0), xmm0);
366 __ jmp(exit);
367
368 __ BIND(is_double);
369 __ movdbl(Address(c_rarg0, 0), xmm0);
370 __ jmp(exit);
371
372 return start;
373 }
374
375 // Return point for a Java call if there's an exception thrown in
376 // Java code. The exception is caught and transformed into a
377 // pending exception stored in JavaThread that can be tested from
378 // within the VM.
379 //
380 // Note: Usually the parameters are removed by the callee. In case
381 // of an exception crossing an activation frame boundary, that is
382 // not the case if the callee is compiled code => need to setup the
383 // rsp.
384 //
385 // rax: exception oop
386
387 address generate_catch_exception() {
388 StubCodeMark mark(this, "StubRoutines", "catch_exception");
389 address start = __ pc();
390
391 // same as in generate_call_stub():
392 const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);
393 const Address thread (rbp, thread_off * wordSize);
394
395 #ifdef ASSERT
396 // verify that threads correspond
397 {
398 Label L, S;
399 __ cmpq(r15_thread, thread);
400 __ jcc(Assembler::notEqual, S);
401 __ get_thread(rbx);
402 __ cmpq(r15_thread, rbx);
403 __ jcc(Assembler::equal, L);
404 __ bind(S);
405 __ stop("StubRoutines::catch_exception: threads must correspond");
406 __ bind(L);
407 }
408 #endif
409
410 // set pending exception
411 __ verify_oop(rax);
412
413 __ movq(Address(r15_thread, Thread::pending_exception_offset()), rax);
414 __ lea(rscratch1, ExternalAddress((address)__FILE__));
415 __ movq(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
416 __ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__);
417
418 // complete return to VM
419 assert(StubRoutines::_call_stub_return_address != NULL,
420 "_call_stub_return_address must have been generated before");
421 __ jump(RuntimeAddress(StubRoutines::_call_stub_return_address));
422
423 return start;
424 }
425
426 // Continuation point for runtime calls returning with a pending
427 // exception. The pending exception check happened in the runtime
428 // or native call stub. The pending exception in Thread is
429 // converted into a Java-level exception.
430 //
431 // Contract with Java-level exception handlers:
432 // rax: exception
433 // rdx: throwing pc
434 //
435 // NOTE: At entry of this stub, exception-pc must be on stack !!
436
437 address generate_forward_exception() {
438 StubCodeMark mark(this, "StubRoutines", "forward exception");
439 address start = __ pc();
440
441 // Upon entry, the sp points to the return address returning into
442 // Java (interpreted or compiled) code; i.e., the return address
443 // becomes the throwing pc.
444 //
445 // Arguments pushed before the runtime call are still on the stack
446 // but the exception handler will reset the stack pointer ->
447 // ignore them. A potential result in registers can be ignored as
448 // well.
449
450 #ifdef ASSERT
451 // make sure this code is only executed if there is a pending exception
452 {
453 Label L;
454 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL);
455 __ jcc(Assembler::notEqual, L);
456 __ stop("StubRoutines::forward exception: no pending exception (1)");
457 __ bind(L);
458 }
459 #endif
460
461 // compute exception handler into rbx
462 __ movq(c_rarg0, Address(rsp, 0));
463 BLOCK_COMMENT("call exception_handler_for_return_address");
464 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
465 SharedRuntime::exception_handler_for_return_address),
466 c_rarg0);
467 __ movq(rbx, rax);
468
469 // setup rax & rdx, remove return address & clear pending exception
470 __ popq(rdx);
471 __ movq(rax, Address(r15_thread, Thread::pending_exception_offset()));
472 __ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD);
473
474 #ifdef ASSERT
475 // make sure exception is set
476 {
477 Label L;
478 __ testq(rax, rax);
479 __ jcc(Assembler::notEqual, L);
480 __ stop("StubRoutines::forward exception: no pending exception (2)");
481 __ bind(L);
482 }
483 #endif
484
485 // continue at exception handler (return address removed)
486 // rax: exception
487 // rbx: exception handler
488 // rdx: throwing pc
489 __ verify_oop(rax);
490 __ jmp(rbx);
491
492 return start;
493 }
494
495 // Support for jint atomic::xchg(jint exchange_value, volatile jint* dest)
496 //
497 // Arguments :
498 // c_rarg0: exchange_value
499 // c_rarg0: dest
500 //
501 // Result:
502 // *dest <- ex, return (orig *dest)
503 address generate_atomic_xchg() {
504 StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
505 address start = __ pc();
506
507 __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow
508 __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK
509 __ ret(0);
510
511 return start;
512 }
513
514 // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
515 //
516 // Arguments :
517 // c_rarg0: exchange_value
518 // c_rarg1: dest
519 //
520 // Result:
521 // *dest <- ex, return (orig *dest)
522 address generate_atomic_xchg_ptr() {
523 StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
524 address start = __ pc();
525
526 __ movq(rax, c_rarg0); // Copy to eax we need a return value anyhow
527 __ xchgq(rax, Address(c_rarg1, 0)); // automatic LOCK
528 __ ret(0);
529
530 return start;
531 }
532
533 // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest,
534 // jint compare_value)
535 //
536 // Arguments :
537 // c_rarg0: exchange_value
538 // c_rarg1: dest
539 // c_rarg2: compare_value
540 //
541 // Result:
542 // if ( compare_value == *dest ) {
543 // *dest = exchange_value
544 // return compare_value;
545 // else
546 // return *dest;
547 address generate_atomic_cmpxchg() {
548 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
549 address start = __ pc();
550
551 __ movl(rax, c_rarg2);
552 if ( os::is_MP() ) __ lock();
553 __ cmpxchgl(c_rarg0, Address(c_rarg1, 0));
554 __ ret(0);
555
556 return start;
557 }
558
559 // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value,
560 // volatile jlong* dest,
561 // jlong compare_value)
562 // Arguments :
563 // c_rarg0: exchange_value
564 // c_rarg1: dest
565 // c_rarg2: compare_value
566 //
567 // Result:
568 // if ( compare_value == *dest ) {
569 // *dest = exchange_value
570 // return compare_value;
571 // else
572 // return *dest;
573 address generate_atomic_cmpxchg_long() {
574 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
575 address start = __ pc();
576
577 __ movq(rax, c_rarg2);
578 if ( os::is_MP() ) __ lock();
579 __ cmpxchgq(c_rarg0, Address(c_rarg1, 0));
580 __ ret(0);
581
582 return start;
583 }
584
585 // Support for jint atomic::add(jint add_value, volatile jint* dest)
586 //
587 // Arguments :
588 // c_rarg0: add_value
589 // c_rarg1: dest
590 //
591 // Result:
592 // *dest += add_value
593 // return *dest;
594 address generate_atomic_add() {
595 StubCodeMark mark(this, "StubRoutines", "atomic_add");
596 address start = __ pc();
597
598 __ movl(rax, c_rarg0);
599 if ( os::is_MP() ) __ lock();
600 __ xaddl(Address(c_rarg1, 0), c_rarg0);
601 __ addl(rax, c_rarg0);
602 __ ret(0);
603
604 return start;
605 }
606
607 // Support for intptr_t atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
608 //
609 // Arguments :
610 // c_rarg0: add_value
611 // c_rarg1: dest
612 //
613 // Result:
614 // *dest += add_value
615 // return *dest;
616 address generate_atomic_add_ptr() {
617 StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
618 address start = __ pc();
619
620 __ movq(rax, c_rarg0); // Copy to eax we need a return value anyhow
621 if ( os::is_MP() ) __ lock();
622 __ xaddl(Address(c_rarg1, 0), c_rarg0);
623 __ addl(rax, c_rarg0);
624 __ ret(0);
625
626 return start;
627 }
628
629 // Support for intptr_t OrderAccess::fence()
630 //
631 // Arguments :
632 //
633 // Result:
634 address generate_orderaccess_fence() {
635 StubCodeMark mark(this, "StubRoutines", "orderaccess_fence");
636 address start = __ pc();
637 __ mfence();
638 __ ret(0);
639
640 return start;
641 }
642
643 // Support for intptr_t get_previous_fp()
644 //
645 // This routine is used to find the previous frame pointer for the
646 // caller (current_frame_guess). This is used as part of debugging
647 // ps() is seemingly lost trying to find frames.
648 // This code assumes that caller current_frame_guess) has a frame.
649 address generate_get_previous_fp() {
650 StubCodeMark mark(this, "StubRoutines", "get_previous_fp");
651 const Address old_fp(rbp, 0);
652 const Address older_fp(rax, 0);
653 address start = __ pc();
654
655 __ enter();
656 __ movq(rax, old_fp); // callers fp
657 __ movq(rax, older_fp); // the frame for ps()
658 __ popq(rbp);
659 __ ret(0);
660
661 return start;
662 }
663
664 //----------------------------------------------------------------------------------------------------
665 // Support for void verify_mxcsr()
666 //
667 // This routine is used with -Xcheck:jni to verify that native
668 // JNI code does not return to Java code without restoring the
669 // MXCSR register to our expected state.
670
671 address generate_verify_mxcsr() {
672 StubCodeMark mark(this, "StubRoutines", "verify_mxcsr");
673 address start = __ pc();
674
675 const Address mxcsr_save(rsp, 0);
676
677 if (CheckJNICalls) {
678 Label ok_ret;
679 __ pushq(rax);
680 __ subq(rsp, wordSize); // allocate a temp location
681 __ stmxcsr(mxcsr_save);
682 __ movl(rax, mxcsr_save);
683 __ andl(rax, MXCSR_MASK); // Only check control and mask bits
684 __ cmpl(rax, *(int *)(StubRoutines::amd64::mxcsr_std()));
685 __ jcc(Assembler::equal, ok_ret);
686
687 __ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
688
689 __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
690
691 __ bind(ok_ret);
692 __ addq(rsp, wordSize);
693 __ popq(rax);
694 }
695
696 __ ret(0);
697
698 return start;
699 }
700
701 address generate_f2i_fixup() {
702 StubCodeMark mark(this, "StubRoutines", "f2i_fixup");
703 Address inout(rsp, 5 * wordSize); // return address + 4 saves
704
705 address start = __ pc();
706
707 Label L;
708
709 __ pushq(rax);
710 __ pushq(c_rarg3);
711 __ pushq(c_rarg2);
712 __ pushq(c_rarg1);
713
714 __ movl(rax, 0x7f800000);
715 __ xorl(c_rarg3, c_rarg3);
716 __ movl(c_rarg2, inout);
717 __ movl(c_rarg1, c_rarg2);
718 __ andl(c_rarg1, 0x7fffffff);
719 __ cmpl(rax, c_rarg1); // NaN? -> 0
720 __ jcc(Assembler::negative, L);
721 __ testl(c_rarg2, c_rarg2); // signed ? min_jint : max_jint
722 __ movl(c_rarg3, 0x80000000);
723 __ movl(rax, 0x7fffffff);
724 __ cmovl(Assembler::positive, c_rarg3, rax);
725
726 __ bind(L);
727 __ movq(inout, c_rarg3);
728
729 __ popq(c_rarg1);
730 __ popq(c_rarg2);
731 __ popq(c_rarg3);
732 __ popq(rax);
733
734 __ ret(0);
735
736 return start;
737 }
738
739 address generate_f2l_fixup() {
740 StubCodeMark mark(this, "StubRoutines", "f2l_fixup");
741 Address inout(rsp, 5 * wordSize); // return address + 4 saves
742 address start = __ pc();
743
744 Label L;
745
746 __ pushq(rax);
747 __ pushq(c_rarg3);
748 __ pushq(c_rarg2);
749 __ pushq(c_rarg1);
750
751 __ movl(rax, 0x7f800000);
752 __ xorl(c_rarg3, c_rarg3);
753 __ movl(c_rarg2, inout);
754 __ movl(c_rarg1, c_rarg2);
755 __ andl(c_rarg1, 0x7fffffff);
756 __ cmpl(rax, c_rarg1); // NaN? -> 0
757 __ jcc(Assembler::negative, L);
758 __ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong
759 __ mov64(c_rarg3, 0x8000000000000000);
760 __ mov64(rax, 0x7fffffffffffffff);
761 __ cmovq(Assembler::positive, c_rarg3, rax);
762
763 __ bind(L);
764 __ movq(inout, c_rarg3);
765
766 __ popq(c_rarg1);
767 __ popq(c_rarg2);
768 __ popq(c_rarg3);
769 __ popq(rax);
770
771 __ ret(0);
772
773 return start;
774 }
775
776 address generate_d2i_fixup() {
777 StubCodeMark mark(this, "StubRoutines", "d2i_fixup");
778 Address inout(rsp, 6 * wordSize); // return address + 5 saves
779
780 address start = __ pc();
781
782 Label L;
783
784 __ pushq(rax);
785 __ pushq(c_rarg3);
786 __ pushq(c_rarg2);
787 __ pushq(c_rarg1);
788 __ pushq(c_rarg0);
789
790 __ movl(rax, 0x7ff00000);
791 __ movq(c_rarg2, inout);
792 __ movl(c_rarg3, c_rarg2);
793 __ movq(c_rarg1, c_rarg2);
794 __ movq(c_rarg0, c_rarg2);
795 __ negl(c_rarg3);
796 __ shrq(c_rarg1, 0x20);
797 __ orl(c_rarg3, c_rarg2);
798 __ andl(c_rarg1, 0x7fffffff);
799 __ xorl(c_rarg2, c_rarg2);
800 __ shrl(c_rarg3, 0x1f);
801 __ orl(c_rarg1, c_rarg3);
802 __ cmpl(rax, c_rarg1);
803 __ jcc(Assembler::negative, L); // NaN -> 0
804 __ testq(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
805 __ movl(c_rarg2, 0x80000000);
806 __ movl(rax, 0x7fffffff);
807 __ cmovl(Assembler::positive, c_rarg2, rax);
808
809 __ bind(L);
810 __ movq(inout, c_rarg2);
811
812 __ popq(c_rarg0);
813 __ popq(c_rarg1);
814 __ popq(c_rarg2);
815 __ popq(c_rarg3);
816 __ popq(rax);
817
818 __ ret(0);
819
820 return start;
821 }
822
823 address generate_d2l_fixup() {
824 StubCodeMark mark(this, "StubRoutines", "d2l_fixup");
825 Address inout(rsp, 6 * wordSize); // return address + 5 saves
826
827 address start = __ pc();
828
829 Label L;
830
831 __ pushq(rax);
832 __ pushq(c_rarg3);
833 __ pushq(c_rarg2);
834 __ pushq(c_rarg1);
835 __ pushq(c_rarg0);
836
837 __ movl(rax, 0x7ff00000);
838 __ movq(c_rarg2, inout);
839 __ movl(c_rarg3, c_rarg2);
840 __ movq(c_rarg1, c_rarg2);
841 __ movq(c_rarg0, c_rarg2);
842 __ negl(c_rarg3);
843 __ shrq(c_rarg1, 0x20);
844 __ orl(c_rarg3, c_rarg2);
845 __ andl(c_rarg1, 0x7fffffff);
846 __ xorl(c_rarg2, c_rarg2);
847 __ shrl(c_rarg3, 0x1f);
848 __ orl(c_rarg1, c_rarg3);
849 __ cmpl(rax, c_rarg1);
850 __ jcc(Assembler::negative, L); // NaN -> 0
851 __ testq(c_rarg0, c_rarg0); // signed ? min_jlong : max_jlong
852 __ mov64(c_rarg2, 0x8000000000000000);
853 __ mov64(rax, 0x7fffffffffffffff);
854 __ cmovq(Assembler::positive, c_rarg2, rax);
855
856 __ bind(L);
857 __ movq(inout, c_rarg2);
858
859 __ popq(c_rarg0);
860 __ popq(c_rarg1);
861 __ popq(c_rarg2);
862 __ popq(c_rarg3);
863 __ popq(rax);
864
865 __ ret(0);
866
867 return start;
868 }
869
870 address generate_fp_mask(const char *stub_name, int64_t mask) {
871 StubCodeMark mark(this, "StubRoutines", stub_name);
872
873 __ align(16);
874 address start = __ pc();
875
876 __ emit_data64( mask, relocInfo::none );
877 __ emit_data64( mask, relocInfo::none );
878
879 return start;
880 }
881
882 // The following routine generates a subroutine to throw an
883 // asynchronous UnknownError when an unsafe access gets a fault that
884 // could not be reasonably prevented by the programmer. (Example:
885 // SIGBUS/OBJERR.)
886 address generate_handler_for_unsafe_access() {
887 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
888 address start = __ pc();
889
890 __ pushq(0); // hole for return address-to-be
891 __ pushaq(); // push registers
892 Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
893
894 __ subq(rsp, frame::arg_reg_save_area_bytes);
895 BLOCK_COMMENT("call handle_unsafe_access");
896 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
897 __ addq(rsp, frame::arg_reg_save_area_bytes);
898
899 __ movq(next_pc, rax); // stuff next address
900 __ popaq();
901 __ ret(0); // jump to next address
902
903 return start;
904 }
905
906 // Non-destructive plausibility checks for oops
907 //
908 // Arguments:
909 // all args on stack!
910 //
911 // Stack after saving c_rarg3:
912 // [tos + 0]: saved c_rarg3
913 // [tos + 1]: saved c_rarg2
914 // [tos + 2]: saved flags
915 // [tos + 3]: return address
916 // * [tos + 4]: error message (char*)
917 // * [tos + 5]: object to verify (oop)
918 // * [tos + 6]: saved rax - saved by caller and bashed
919 // * = popped on exit
920 address generate_verify_oop() {
921 StubCodeMark mark(this, "StubRoutines", "verify_oop");
922 address start = __ pc();
923
924 Label exit, error;
925
926 __ pushfq();
927 __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
928
929 // save c_rarg2 and c_rarg3
930 __ pushq(c_rarg2);
931 __ pushq(c_rarg3);
932
933 // get object
934 __ movq(rax, Address(rsp, 5 * wordSize));
935
936 // make sure object is 'reasonable'
937 __ testq(rax, rax);
938 __ jcc(Assembler::zero, exit); // if obj is NULL it is OK
939 // Check if the oop is in the right area of memory
940 __ movq(c_rarg2, rax);
941 __ movptr(c_rarg3, (int64_t) Universe::verify_oop_mask());
942 __ andq(c_rarg2, c_rarg3);
943 __ movptr(c_rarg3, (int64_t) Universe::verify_oop_bits());
944 __ cmpq(c_rarg2, c_rarg3);
945 __ jcc(Assembler::notZero, error);
946
947 // make sure klass is 'reasonable'
948 __ movq(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
949 __ testq(rax, rax);
950 __ jcc(Assembler::zero, error); // if klass is NULL it is broken
951 // Check if the klass is in the right area of memory
952 __ movq(c_rarg2, rax);
953 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
954 __ andq(c_rarg2, c_rarg3);
955 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
956 __ cmpq(c_rarg2, c_rarg3);
957 __ jcc(Assembler::notZero, error);
958
959 // make sure klass' klass is 'reasonable'
960 __ movq(rax, Address(rax, oopDesc::klass_offset_in_bytes()));
961 __ testq(rax, rax);
962 __ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
963 // Check if the klass' klass is in the right area of memory
964 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
965 __ andq(rax, c_rarg3);
966 __ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
967 __ cmpq(rax, c_rarg3);
968 __ jcc(Assembler::notZero, error);
969
970 // return if everything seems ok
971 __ bind(exit);
972 __ movq(rax, Address(rsp, 6 * wordSize)); // get saved rax back
973 __ popq(c_rarg3); // restore c_rarg3
974 __ popq(c_rarg2); // restore c_rarg2
975 __ popfq(); // restore flags
976 __ ret(3 * wordSize); // pop caller saved stuff
977
978 // handle errors
979 __ bind(error);
980 __ movq(rax, Address(rsp, 6 * wordSize)); // get saved rax back
981 __ popq(c_rarg3); // get saved c_rarg3 back
982 __ popq(c_rarg2); // get saved c_rarg2 back
983 __ popfq(); // get saved flags off stack --
984 // will be ignored
985
986 __ pushaq(); // push registers
987 // (rip is already
988 // already pushed)
989 // debug(char* msg, int64_t regs[])
990 // We've popped the registers we'd saved (c_rarg3, c_rarg2 and flags), and
991 // pushed all the registers, so now the stack looks like:
992 // [tos + 0] 16 saved registers
993 // [tos + 16] return address
994 // [tos + 17] error message (char*)
995
996 __ movq(c_rarg0, Address(rsp, 17 * wordSize)); // pass address of error message
997 __ movq(c_rarg1, rsp); // pass address of regs on stack
998 __ movq(r12, rsp); // remember rsp
999 __ subq(rsp, frame::arg_reg_save_area_bytes);// windows
1000 __ andq(rsp, -16); // align stack as required by ABI
1001 BLOCK_COMMENT("call MacroAssembler::debug");
1002 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug)));
1003 __ movq(rsp, r12); // restore rsp
1004 __ popaq(); // pop registers
1005 __ ret(3 * wordSize); // pop caller saved stuff
1006
1007 return start;
1008 }
1009
1010 static address disjoint_byte_copy_entry;
1011 static address disjoint_short_copy_entry;
1012 static address disjoint_int_copy_entry;
1013 static address disjoint_long_copy_entry;
1014 static address disjoint_oop_copy_entry;
1015
1016 static address byte_copy_entry;
1017 static address short_copy_entry;
1018 static address int_copy_entry;
1019 static address long_copy_entry;
1020 static address oop_copy_entry;
1021
1022 static address checkcast_copy_entry;
1023
1024 //
1025 // Verify that a register contains clean 32-bits positive value
1026 // (high 32-bits are 0) so it could be used in 64-bits shifts.
1027 //
1028 // Input:
1029 // Rint - 32-bits value
1030 // Rtmp - scratch
1031 //
1032 void assert_clean_int(Register Rint, Register Rtmp) {
1033 #ifdef ASSERT
1034 Label L;
1035 assert_different_registers(Rtmp, Rint);
1036 __ movslq(Rtmp, Rint);
1037 __ cmpq(Rtmp, Rint);
1038 __ jccb(Assembler::equal, L);
1039 __ stop("high 32-bits of int value are not 0");
1040 __ bind(L);
1041 #endif
1042 }
1043
1044 // Generate overlap test for array copy stubs
1045 //
1046 // Input:
1047 // c_rarg0 - from
1048 // c_rarg1 - to
1049 // c_rarg2 - element count
1050 //
1051 // Output:
1052 // rax - &from[element count - 1]
1053 //
1054 void array_overlap_test(address no_overlap_target, Address::ScaleFactor sf) {
1055 assert(no_overlap_target != NULL, "must be generated");
1056 array_overlap_test(no_overlap_target, NULL, sf);
1057 }
1058 void array_overlap_test(Label& L_no_overlap, Address::ScaleFactor sf) {
1059 array_overlap_test(NULL, &L_no_overlap, sf);
1060 }
1061 void array_overlap_test(address no_overlap_target, Label* NOLp, Address::ScaleFactor sf) {
1062 const Register from = c_rarg0;
1063 const Register to = c_rarg1;
1064 const Register count = c_rarg2;
1065 const Register end_from = rax;
1066
1067 __ cmpq(to, from);
1068 __ leaq(end_from, Address(from, count, sf, 0));
1069 if (NOLp == NULL) {
1070 ExternalAddress no_overlap(no_overlap_target);
1071 __ jump_cc(Assembler::belowEqual, no_overlap);
1072 __ cmpq(to, end_from);
1073 __ jump_cc(Assembler::aboveEqual, no_overlap);
1074 } else {
1075 __ jcc(Assembler::belowEqual, (*NOLp));
1076 __ cmpq(to, end_from);
1077 __ jcc(Assembler::aboveEqual, (*NOLp));
1078 }
1079 }
1080
1081 // Shuffle first three arg regs on Windows into Linux/Solaris locations.
1082 //
1083 // Outputs:
1084 // rdi - rcx
1085 // rsi - rdx
1086 // rdx - r8
1087 // rcx - r9
1088 //
1089 // Registers r9 and r10 are used to save rdi and rsi on Windows, which latter
1090 // are non-volatile. r9 and r10 should not be used by the caller.
1091 //
1092 void setup_arg_regs(int nargs = 3) {
1093 const Register saved_rdi = r9;
1094 const Register saved_rsi = r10;
1095 assert(nargs == 3 || nargs == 4, "else fix");
1096 #ifdef _WIN64
1097 assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
1098 "unexpected argument registers");
1099 if (nargs >= 4)
1100 __ movq(rax, r9); // r9 is also saved_rdi
1101 __ movq(saved_rdi, rdi);
1102 __ movq(saved_rsi, rsi);
1103 __ movq(rdi, rcx); // c_rarg0
1104 __ movq(rsi, rdx); // c_rarg1
1105 __ movq(rdx, r8); // c_rarg2
1106 if (nargs >= 4)
1107 __ movq(rcx, rax); // c_rarg3 (via rax)
1108 #else
1109 assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
1110 "unexpected argument registers");
1111 #endif
1112 }
1113
1114 void restore_arg_regs() {
1115 const Register saved_rdi = r9;
1116 const Register saved_rsi = r10;
1117 #ifdef _WIN64
1118 __ movq(rdi, saved_rdi);
1119 __ movq(rsi, saved_rsi);
1120 #endif
1121 }
1122
1123 // Generate code for an array write pre barrier
1124 //
1125 // addr - starting address
1126 // count - element count
1127 //
1128 // Destroy no registers!
1129 //
1130 void gen_write_ref_array_pre_barrier(Register addr, Register count) {
1131 #if 0 // G1 - only
1132 assert_different_registers(addr, c_rarg1);
1133 assert_different_registers(count, c_rarg0);
1134 BarrierSet* bs = Universe::heap()->barrier_set();
1135 switch (bs->kind()) {
1136 case BarrierSet::G1SATBCT:
1137 case BarrierSet::G1SATBCTLogging:
1138 {
1139 __ pushaq(); // push registers
1140 __ movq(c_rarg0, addr);
1141 __ movq(c_rarg1, count);
1142 __ call(RuntimeAddress(BarrierSet::static_write_ref_array_pre));
1143 __ popaq();
1144 }
1145 break;
1146 case BarrierSet::CardTableModRef:
1147 case BarrierSet::CardTableExtension:
1148 case BarrierSet::ModRef:
1149 break;
1150 default :
1151 ShouldNotReachHere();
1152
1153 }
1154 #endif // 0 G1 - only
1155 }
1156
1157 //
1158 // Generate code for an array write post barrier
1159 //
1160 // Input:
1161 // start - register containing starting address of destination array
1162 // end - register containing ending address of destination array
1163 // scratch - scratch register
1164 //
1165 // The input registers are overwritten.
1166 // The ending address is inclusive.
1167 void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch) {
1168 assert_different_registers(start, end, scratch);
1169 BarrierSet* bs = Universe::heap()->barrier_set();
1170 switch (bs->kind()) {
1171 #if 0 // G1 - only
1172 case BarrierSet::G1SATBCT:
1173 case BarrierSet::G1SATBCTLogging:
1174
1175 {
1176 __ pushaq(); // push registers (overkill)
1177 // must compute element count unless barrier set interface is changed (other platforms supply count)
1178 assert_different_registers(start, end, scratch);
1179 __ leaq(scratch, Address(end, wordSize));
1180 __ subq(scratch, start);
1181 __ shrq(scratch, LogBytesPerWord);
1182 __ movq(c_rarg0, start);
1183 __ movq(c_rarg1, scratch);
1184 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
1185 __ popaq();
1186 }
1187 break;
1188 #endif // 0 G1 - only
1189 case BarrierSet::CardTableModRef:
1190 case BarrierSet::CardTableExtension:
1191 {
1192 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1193 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1194
1195 Label L_loop;
1196
1197 __ shrq(start, CardTableModRefBS::card_shift);
1198 __ shrq(end, CardTableModRefBS::card_shift);
1199 __ subq(end, start); // number of bytes to copy
1200
1201 const Register count = end; // 'end' register contains bytes count now
1202 __ lea(scratch, ExternalAddress((address)ct->byte_map_base));
1203 __ addq(start, scratch);
1204 __ BIND(L_loop);
1205 __ movb(Address(start, count, Address::times_1), 0);
1206 __ decrementq(count);
1207 __ jcc(Assembler::greaterEqual, L_loop);
1208 }
1209 }
1210 }
1211
1212 // Copy big chunks forward
1213 //
1214 // Inputs:
1215 // end_from - source arrays end address
1216 // end_to - destination array end address
1217 // qword_count - 64-bits element count, negative
1218 // to - scratch
1219 // L_copy_32_bytes - entry label
1220 // L_copy_8_bytes - exit label
1221 //
1222 void copy_32_bytes_forward(Register end_from, Register end_to,
1223 Register qword_count, Register to,
1224 Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1225 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1226 Label L_loop;
1227 __ align(16);
1228 __ BIND(L_loop);
1229 __ movq(to, Address(end_from, qword_count, Address::times_8, -24));
1230 __ movq(Address(end_to, qword_count, Address::times_8, -24), to);
1231 __ movq(to, Address(end_from, qword_count, Address::times_8, -16));
1232 __ movq(Address(end_to, qword_count, Address::times_8, -16), to);
1233 __ movq(to, Address(end_from, qword_count, Address::times_8, - 8));
1234 __ movq(Address(end_to, qword_count, Address::times_8, - 8), to);
1235 __ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
1236 __ movq(Address(end_to, qword_count, Address::times_8, - 0), to);
1237 __ BIND(L_copy_32_bytes);
1238 __ addq(qword_count, 4);
1239 __ jcc(Assembler::lessEqual, L_loop);
1240 __ subq(qword_count, 4);
1241 __ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords
1242 }
1243
1244
1245 // Copy big chunks backward
1246 //
1247 // Inputs:
1248 // from - source arrays address
1249 // dest - destination array address
1250 // qword_count - 64-bits element count
1251 // to - scratch
1252 // L_copy_32_bytes - entry label
1253 // L_copy_8_bytes - exit label
1254 //
1255 void copy_32_bytes_backward(Register from, Register dest,
1256 Register qword_count, Register to,
1257 Label& L_copy_32_bytes, Label& L_copy_8_bytes) {
1258 DEBUG_ONLY(__ stop("enter at entry label, not here"));
1259 Label L_loop;
1260 __ align(16);
1261 __ BIND(L_loop);
1262 __ movq(to, Address(from, qword_count, Address::times_8, 24));
1263 __ movq(Address(dest, qword_count, Address::times_8, 24), to);
1264 __ movq(to, Address(from, qword_count, Address::times_8, 16));
1265 __ movq(Address(dest, qword_count, Address::times_8, 16), to);
1266 __ movq(to, Address(from, qword_count, Address::times_8, 8));
1267 __ movq(Address(dest, qword_count, Address::times_8, 8), to);
1268 __ movq(to, Address(from, qword_count, Address::times_8, 0));
1269 __ movq(Address(dest, qword_count, Address::times_8, 0), to);
1270 __ BIND(L_copy_32_bytes);
1271 __ subq(qword_count, 4);
1272 __ jcc(Assembler::greaterEqual, L_loop);
1273 __ addq(qword_count, 4);
1274 __ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords
1275 }
1276
1277
1278 // Arguments:
1279 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1280 // ignored
1281 // name - stub name string
1282 //
1283 // Inputs:
1284 // c_rarg0 - source array address
1285 // c_rarg1 - destination array address
1286 // c_rarg2 - element count, treated as ssize_t, can be zero
1287 //
1288 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1289 // we let the hardware handle it. The one to eight bytes within words,
1290 // dwords or qwords that span cache line boundaries will still be loaded
1291 // and stored atomically.
1292 //
1293 // Side Effects:
1294 // disjoint_byte_copy_entry is set to the no-overlap entry point
1295 // used by generate_conjoint_byte_copy().
1296 //
1297 address generate_disjoint_byte_copy(bool aligned, const char *name) {
1298 __ align(CodeEntryAlignment);
1299 StubCodeMark mark(this, "StubRoutines", name);
1300 address start = __ pc();
1301
1302 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1303 Label L_copy_byte, L_exit;
1304 const Register from = rdi; // source array address
1305 const Register to = rsi; // destination array address
1306 const Register count = rdx; // elements count
1307 const Register byte_count = rcx;
1308 const Register qword_count = count;
1309 const Register end_from = from; // source array end address
1310 const Register end_to = to; // destination array end address
1311 // End pointers are inclusive, and if count is not zero they point
1312 // to the last unit copied: end_to[0] := end_from[0]
1313
1314 __ enter(); // required for proper stackwalking of RuntimeStub frame
1315 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1316
1317 disjoint_byte_copy_entry = __ pc();
1318 BLOCK_COMMENT("Entry:");
1319 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1320
1321 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1322 // r9 and r10 may be used to save non-volatile registers
1323
1324 // 'from', 'to' and 'count' are now valid
1325 __ movq(byte_count, count);
1326 __ shrq(count, 3); // count => qword_count
1327
1328 // Copy from low to high addresses. Use 'to' as scratch.
1329 __ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
1330 __ leaq(end_to, Address(to, qword_count, Address::times_8, -8));
1331 __ negq(qword_count); // make the count negative
1332 __ jmp(L_copy_32_bytes);
1333
1334 // Copy trailing qwords
1335 __ BIND(L_copy_8_bytes);
1336 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1337 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1338 __ incrementq(qword_count);
1339 __ jcc(Assembler::notZero, L_copy_8_bytes);
1340
1341 // Check for and copy trailing dword
1342 __ BIND(L_copy_4_bytes);
1343 __ testq(byte_count, 4);
1344 __ jccb(Assembler::zero, L_copy_2_bytes);
1345 __ movl(rax, Address(end_from, 8));
1346 __ movl(Address(end_to, 8), rax);
1347
1348 __ addq(end_from, 4);
1349 __ addq(end_to, 4);
1350
1351 // Check for and copy trailing word
1352 __ BIND(L_copy_2_bytes);
1353 __ testq(byte_count, 2);
1354 __ jccb(Assembler::zero, L_copy_byte);
1355 __ movw(rax, Address(end_from, 8));
1356 __ movw(Address(end_to, 8), rax);
1357
1358 __ addq(end_from, 2);
1359 __ addq(end_to, 2);
1360
1361 // Check for and copy trailing byte
1362 __ BIND(L_copy_byte);
1363 __ testq(byte_count, 1);
1364 __ jccb(Assembler::zero, L_exit);
1365 __ movb(rax, Address(end_from, 8));
1366 __ movb(Address(end_to, 8), rax);
1367
1368 __ BIND(L_exit);
1369 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1370 restore_arg_regs();
1371 __ xorq(rax, rax); // return 0
1372 __ leave(); // required for proper stackwalking of RuntimeStub frame
1373 __ ret(0);
1374
1375 // Copy in 32-bytes chunks
1376 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1377 __ jmp(L_copy_4_bytes);
1378
1379 return start;
1380 }
1381
1382 // Arguments:
1383 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1384 // ignored
1385 // name - stub name string
1386 //
1387 // Inputs:
1388 // c_rarg0 - source array address
1389 // c_rarg1 - destination array address
1390 // c_rarg2 - element count, treated as ssize_t, can be zero
1391 //
1392 // If 'from' and/or 'to' are aligned on 4-, 2-, or 1-byte boundaries,
1393 // we let the hardware handle it. The one to eight bytes within words,
1394 // dwords or qwords that span cache line boundaries will still be loaded
1395 // and stored atomically.
1396 //
1397 address generate_conjoint_byte_copy(bool aligned, const char *name) {
1398 __ align(CodeEntryAlignment);
1399 StubCodeMark mark(this, "StubRoutines", name);
1400 address start = __ pc();
1401
1402 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_copy_2_bytes;
1403 const Register from = rdi; // source array address
1404 const Register to = rsi; // destination array address
1405 const Register count = rdx; // elements count
1406 const Register byte_count = rcx;
1407 const Register qword_count = count;
1408
1409 __ enter(); // required for proper stackwalking of RuntimeStub frame
1410 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1411
1412 byte_copy_entry = __ pc();
1413 BLOCK_COMMENT("Entry:");
1414 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1415
1416 array_overlap_test(disjoint_byte_copy_entry, Address::times_1);
1417 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1418 // r9 and r10 may be used to save non-volatile registers
1419
1420 // 'from', 'to' and 'count' are now valid
1421 __ movq(byte_count, count);
1422 __ shrq(count, 3); // count => qword_count
1423
1424 // Copy from high to low addresses.
1425
1426 // Check for and copy trailing byte
1427 __ testq(byte_count, 1);
1428 __ jcc(Assembler::zero, L_copy_2_bytes);
1429 __ movb(rax, Address(from, byte_count, Address::times_1, -1));
1430 __ movb(Address(to, byte_count, Address::times_1, -1), rax);
1431 __ decrementq(byte_count); // Adjust for possible trailing word
1432
1433 // Check for and copy trailing word
1434 __ BIND(L_copy_2_bytes);
1435 __ testq(byte_count, 2);
1436 __ jcc(Assembler::zero, L_copy_4_bytes);
1437 __ movw(rax, Address(from, byte_count, Address::times_1, -2));
1438 __ movw(Address(to, byte_count, Address::times_1, -2), rax);
1439
1440 // Check for and copy trailing dword
1441 __ BIND(L_copy_4_bytes);
1442 __ testq(byte_count, 4);
1443 __ jcc(Assembler::zero, L_copy_32_bytes);
1444 __ movl(rax, Address(from, qword_count, Address::times_8));
1445 __ movl(Address(to, qword_count, Address::times_8), rax);
1446 __ jmp(L_copy_32_bytes);
1447
1448 // Copy trailing qwords
1449 __ BIND(L_copy_8_bytes);
1450 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1451 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1452 __ decrementq(qword_count);
1453 __ jcc(Assembler::notZero, L_copy_8_bytes);
1454
1455 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1456 restore_arg_regs();
1457 __ xorq(rax, rax); // return 0
1458 __ leave(); // required for proper stackwalking of RuntimeStub frame
1459 __ ret(0);
1460
1461 // Copy in 32-bytes chunks
1462 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1463
1464 inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
1465 restore_arg_regs();
1466 __ xorq(rax, rax); // return 0
1467 __ leave(); // required for proper stackwalking of RuntimeStub frame
1468 __ ret(0);
1469
1470 return start;
1471 }
1472
1473 // Arguments:
1474 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1475 // ignored
1476 // name - stub name string
1477 //
1478 // Inputs:
1479 // c_rarg0 - source array address
1480 // c_rarg1 - destination array address
1481 // c_rarg2 - element count, treated as ssize_t, can be zero
1482 //
1483 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1484 // let the hardware handle it. The two or four words within dwords
1485 // or qwords that span cache line boundaries will still be loaded
1486 // and stored atomically.
1487 //
1488 // Side Effects:
1489 // disjoint_short_copy_entry is set to the no-overlap entry point
1490 // used by generate_conjoint_short_copy().
1491 //
1492 address generate_disjoint_short_copy(bool aligned, const char *name) {
1493 __ align(CodeEntryAlignment);
1494 StubCodeMark mark(this, "StubRoutines", name);
1495 address start = __ pc();
1496
1497 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes,L_copy_2_bytes,L_exit;
1498 const Register from = rdi; // source array address
1499 const Register to = rsi; // destination array address
1500 const Register count = rdx; // elements count
1501 const Register word_count = rcx;
1502 const Register qword_count = count;
1503 const Register end_from = from; // source array end address
1504 const Register end_to = to; // destination array end address
1505 // End pointers are inclusive, and if count is not zero they point
1506 // to the last unit copied: end_to[0] := end_from[0]
1507
1508 __ enter(); // required for proper stackwalking of RuntimeStub frame
1509 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1510
1511 disjoint_short_copy_entry = __ pc();
1512 BLOCK_COMMENT("Entry:");
1513 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1514
1515 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1516 // r9 and r10 may be used to save non-volatile registers
1517
1518 // 'from', 'to' and 'count' are now valid
1519 __ movq(word_count, count);
1520 __ shrq(count, 2); // count => qword_count
1521
1522 // Copy from low to high addresses. Use 'to' as scratch.
1523 __ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
1524 __ leaq(end_to, Address(to, qword_count, Address::times_8, -8));
1525 __ negq(qword_count);
1526 __ jmp(L_copy_32_bytes);
1527
1528 // Copy trailing qwords
1529 __ BIND(L_copy_8_bytes);
1530 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1531 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1532 __ incrementq(qword_count);
1533 __ jcc(Assembler::notZero, L_copy_8_bytes);
1534
1535 // Original 'dest' is trashed, so we can't use it as a
1536 // base register for a possible trailing word copy
1537
1538 // Check for and copy trailing dword
1539 __ BIND(L_copy_4_bytes);
1540 __ testq(word_count, 2);
1541 __ jccb(Assembler::zero, L_copy_2_bytes);
1542 __ movl(rax, Address(end_from, 8));
1543 __ movl(Address(end_to, 8), rax);
1544
1545 __ addq(end_from, 4);
1546 __ addq(end_to, 4);
1547
1548 // Check for and copy trailing word
1549 __ BIND(L_copy_2_bytes);
1550 __ testq(word_count, 1);
1551 __ jccb(Assembler::zero, L_exit);
1552 __ movw(rax, Address(end_from, 8));
1553 __ movw(Address(end_to, 8), rax);
1554
1555 __ BIND(L_exit);
1556 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1557 restore_arg_regs();
1558 __ xorq(rax, rax); // return 0
1559 __ leave(); // required for proper stackwalking of RuntimeStub frame
1560 __ ret(0);
1561
1562 // Copy in 32-bytes chunks
1563 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1564 __ jmp(L_copy_4_bytes);
1565
1566 return start;
1567 }
1568
1569 // Arguments:
1570 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1571 // ignored
1572 // name - stub name string
1573 //
1574 // Inputs:
1575 // c_rarg0 - source array address
1576 // c_rarg1 - destination array address
1577 // c_rarg2 - element count, treated as ssize_t, can be zero
1578 //
1579 // If 'from' and/or 'to' are aligned on 4- or 2-byte boundaries, we
1580 // let the hardware handle it. The two or four words within dwords
1581 // or qwords that span cache line boundaries will still be loaded
1582 // and stored atomically.
1583 //
1584 address generate_conjoint_short_copy(bool aligned, const char *name) {
1585 __ align(CodeEntryAlignment);
1586 StubCodeMark mark(this, "StubRoutines", name);
1587 address start = __ pc();
1588
1589 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes;
1590 const Register from = rdi; // source array address
1591 const Register to = rsi; // destination array address
1592 const Register count = rdx; // elements count
1593 const Register word_count = rcx;
1594 const Register qword_count = count;
1595
1596 __ enter(); // required for proper stackwalking of RuntimeStub frame
1597 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1598
1599 short_copy_entry = __ pc();
1600 BLOCK_COMMENT("Entry:");
1601 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1602
1603 array_overlap_test(disjoint_short_copy_entry, Address::times_2);
1604 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1605 // r9 and r10 may be used to save non-volatile registers
1606
1607 // 'from', 'to' and 'count' are now valid
1608 __ movq(word_count, count);
1609 __ shrq(count, 2); // count => qword_count
1610
1611 // Copy from high to low addresses. Use 'to' as scratch.
1612
1613 // Check for and copy trailing word
1614 __ testq(word_count, 1);
1615 __ jccb(Assembler::zero, L_copy_4_bytes);
1616 __ movw(rax, Address(from, word_count, Address::times_2, -2));
1617 __ movw(Address(to, word_count, Address::times_2, -2), rax);
1618
1619 // Check for and copy trailing dword
1620 __ BIND(L_copy_4_bytes);
1621 __ testq(word_count, 2);
1622 __ jcc(Assembler::zero, L_copy_32_bytes);
1623 __ movl(rax, Address(from, qword_count, Address::times_8));
1624 __ movl(Address(to, qword_count, Address::times_8), rax);
1625 __ jmp(L_copy_32_bytes);
1626
1627 // Copy trailing qwords
1628 __ BIND(L_copy_8_bytes);
1629 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1630 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1631 __ decrementq(qword_count);
1632 __ jcc(Assembler::notZero, L_copy_8_bytes);
1633
1634 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1635 restore_arg_regs();
1636 __ xorq(rax, rax); // return 0
1637 __ leave(); // required for proper stackwalking of RuntimeStub frame
1638 __ ret(0);
1639
1640 // Copy in 32-bytes chunks
1641 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1642
1643 inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
1644 restore_arg_regs();
1645 __ xorq(rax, rax); // return 0
1646 __ leave(); // required for proper stackwalking of RuntimeStub frame
1647 __ ret(0);
1648
1649 return start;
1650 }
1651
1652 // Arguments:
1653 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1654 // ignored
1655 // name - stub name string
1656 //
1657 // Inputs:
1658 // c_rarg0 - source array address
1659 // c_rarg1 - destination array address
1660 // c_rarg2 - element count, treated as ssize_t, can be zero
1661 //
1662 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1663 // the hardware handle it. The two dwords within qwords that span
1664 // cache line boundaries will still be loaded and stored atomicly.
1665 //
1666 // Side Effects:
1667 // disjoint_int_copy_entry is set to the no-overlap entry point
1668 // used by generate_conjoint_int_copy().
1669 //
1670 address generate_disjoint_int_copy(bool aligned, const char *name) {
1671 __ align(CodeEntryAlignment);
1672 StubCodeMark mark(this, "StubRoutines", name);
1673 address start = __ pc();
1674
1675 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit;
1676 const Register from = rdi; // source array address
1677 const Register to = rsi; // destination array address
1678 const Register count = rdx; // elements count
1679 const Register dword_count = rcx;
1680 const Register qword_count = count;
1681 const Register end_from = from; // source array end address
1682 const Register end_to = to; // destination array end address
1683 // End pointers are inclusive, and if count is not zero they point
1684 // to the last unit copied: end_to[0] := end_from[0]
1685
1686 __ enter(); // required for proper stackwalking of RuntimeStub frame
1687 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1688
1689 disjoint_int_copy_entry = __ pc();
1690 BLOCK_COMMENT("Entry:");
1691 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1692
1693 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1694 // r9 and r10 may be used to save non-volatile registers
1695
1696 // 'from', 'to' and 'count' are now valid
1697 __ movq(dword_count, count);
1698 __ shrq(count, 1); // count => qword_count
1699
1700 // Copy from low to high addresses. Use 'to' as scratch.
1701 __ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
1702 __ leaq(end_to, Address(to, qword_count, Address::times_8, -8));
1703 __ negq(qword_count);
1704 __ jmp(L_copy_32_bytes);
1705
1706 // Copy trailing qwords
1707 __ BIND(L_copy_8_bytes);
1708 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1709 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1710 __ incrementq(qword_count);
1711 __ jcc(Assembler::notZero, L_copy_8_bytes);
1712
1713 // Check for and copy trailing dword
1714 __ BIND(L_copy_4_bytes);
1715 __ testq(dword_count, 1); // Only byte test since the value is 0 or 1
1716 __ jccb(Assembler::zero, L_exit);
1717 __ movl(rax, Address(end_from, 8));
1718 __ movl(Address(end_to, 8), rax);
1719
1720 __ BIND(L_exit);
1721 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1722 restore_arg_regs();
1723 __ xorq(rax, rax); // return 0
1724 __ leave(); // required for proper stackwalking of RuntimeStub frame
1725 __ ret(0);
1726
1727 // Copy 32-bytes chunks
1728 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1729 __ jmp(L_copy_4_bytes);
1730
1731 return start;
1732 }
1733
1734 // Arguments:
1735 // aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
1736 // ignored
1737 // name - stub name string
1738 //
1739 // Inputs:
1740 // c_rarg0 - source array address
1741 // c_rarg1 - destination array address
1742 // c_rarg2 - element count, treated as ssize_t, can be zero
1743 //
1744 // If 'from' and/or 'to' are aligned on 4-byte boundaries, we let
1745 // the hardware handle it. The two dwords within qwords that span
1746 // cache line boundaries will still be loaded and stored atomicly.
1747 //
1748 address generate_conjoint_int_copy(bool aligned, const char *name) {
1749 __ align(CodeEntryAlignment);
1750 StubCodeMark mark(this, "StubRoutines", name);
1751 address start = __ pc();
1752
1753 Label L_copy_32_bytes, L_copy_8_bytes, L_copy_2_bytes;
1754 const Register from = rdi; // source array address
1755 const Register to = rsi; // destination array address
1756 const Register count = rdx; // elements count
1757 const Register dword_count = rcx;
1758 const Register qword_count = count;
1759
1760 __ enter(); // required for proper stackwalking of RuntimeStub frame
1761 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1762
1763 int_copy_entry = __ pc();
1764 BLOCK_COMMENT("Entry:");
1765 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1766
1767 array_overlap_test(disjoint_int_copy_entry, Address::times_4);
1768 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1769 // r9 and r10 may be used to save non-volatile registers
1770
1771 // 'from', 'to' and 'count' are now valid
1772 __ movq(dword_count, count);
1773 __ shrq(count, 1); // count => qword_count
1774
1775 // Copy from high to low addresses. Use 'to' as scratch.
1776
1777 // Check for and copy trailing dword
1778 __ testq(dword_count, 1);
1779 __ jcc(Assembler::zero, L_copy_32_bytes);
1780 __ movl(rax, Address(from, dword_count, Address::times_4, -4));
1781 __ movl(Address(to, dword_count, Address::times_4, -4), rax);
1782 __ jmp(L_copy_32_bytes);
1783
1784 // Copy trailing qwords
1785 __ BIND(L_copy_8_bytes);
1786 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1787 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1788 __ decrementq(qword_count);
1789 __ jcc(Assembler::notZero, L_copy_8_bytes);
1790
1791 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1792 restore_arg_regs();
1793 __ xorq(rax, rax); // return 0
1794 __ leave(); // required for proper stackwalking of RuntimeStub frame
1795 __ ret(0);
1796
1797 // Copy in 32-bytes chunks
1798 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1799
1800 inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
1801 restore_arg_regs();
1802 __ xorq(rax, rax); // return 0
1803 __ leave(); // required for proper stackwalking of RuntimeStub frame
1804 __ ret(0);
1805
1806 return start;
1807 }
1808
1809 // Arguments:
1810 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
1811 // ignored
1812 // is_oop - true => oop array, so generate store check code
1813 // name - stub name string
1814 //
1815 // Inputs:
1816 // c_rarg0 - source array address
1817 // c_rarg1 - destination array address
1818 // c_rarg2 - element count, treated as ssize_t, can be zero
1819 //
1820 // Side Effects:
1821 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
1822 // no-overlap entry point used by generate_conjoint_long_oop_copy().
1823 //
1824 address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) {
1825 __ align(CodeEntryAlignment);
1826 StubCodeMark mark(this, "StubRoutines", name);
1827 address start = __ pc();
1828
1829 Label L_copy_32_bytes, L_copy_8_bytes, L_exit;
1830 const Register from = rdi; // source array address
1831 const Register to = rsi; // destination array address
1832 const Register qword_count = rdx; // elements count
1833 const Register end_from = from; // source array end address
1834 const Register end_to = rcx; // destination array end address
1835 const Register saved_to = to;
1836 // End pointers are inclusive, and if count is not zero they point
1837 // to the last unit copied: end_to[0] := end_from[0]
1838
1839 __ enter(); // required for proper stackwalking of RuntimeStub frame
1840 // Save no-overlap entry point for generate_conjoint_long_oop_copy()
1841 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1842
1843 if (is_oop) {
1844 disjoint_oop_copy_entry = __ pc();
1845 // no registers are destroyed by this call
1846 gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2);
1847 } else {
1848 disjoint_long_copy_entry = __ pc();
1849 }
1850 BLOCK_COMMENT("Entry:");
1851 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1852
1853 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1854 // r9 and r10 may be used to save non-volatile registers
1855
1856 // 'from', 'to' and 'qword_count' are now valid
1857
1858 // Copy from low to high addresses. Use 'to' as scratch.
1859 __ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
1860 __ leaq(end_to, Address(to, qword_count, Address::times_8, -8));
1861 __ negq(qword_count);
1862 __ jmp(L_copy_32_bytes);
1863
1864 // Copy trailing qwords
1865 __ BIND(L_copy_8_bytes);
1866 __ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
1867 __ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
1868 __ incrementq(qword_count);
1869 __ jcc(Assembler::notZero, L_copy_8_bytes);
1870
1871 if (is_oop) {
1872 __ jmp(L_exit);
1873 } else {
1874 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1875 restore_arg_regs();
1876 __ xorq(rax, rax); // return 0
1877 __ leave(); // required for proper stackwalking of RuntimeStub frame
1878 __ ret(0);
1879 }
1880
1881 // Copy 64-byte chunks
1882 copy_32_bytes_forward(end_from, end_to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1883
1884 if (is_oop) {
1885 __ BIND(L_exit);
1886 gen_write_ref_array_post_barrier(saved_to, end_to, rax);
1887 inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
1888 } else {
1889 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1890 }
1891 restore_arg_regs();
1892 __ xorq(rax, rax); // return 0
1893 __ leave(); // required for proper stackwalking of RuntimeStub frame
1894 __ ret(0);
1895
1896 return start;
1897 }
1898
1899 // Arguments:
1900 // aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
1901 // ignored
1902 // is_oop - true => oop array, so generate store check code
1903 // name - stub name string
1904 //
1905 // Inputs:
1906 // c_rarg0 - source array address
1907 // c_rarg1 - destination array address
1908 // c_rarg2 - element count, treated as ssize_t, can be zero
1909 //
1910 address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) {
1911 __ align(CodeEntryAlignment);
1912 StubCodeMark mark(this, "StubRoutines", name);
1913 address start = __ pc();
1914
1915 Label L_copy_32_bytes, L_copy_8_bytes, L_exit;
1916 const Register from = rdi; // source array address
1917 const Register to = rsi; // destination array address
1918 const Register qword_count = rdx; // elements count
1919 const Register saved_count = rcx;
1920
1921 __ enter(); // required for proper stackwalking of RuntimeStub frame
1922 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int.
1923
1924 address disjoint_copy_entry = NULL;
1925 if (is_oop) {
1926 disjoint_copy_entry = disjoint_oop_copy_entry;
1927 oop_copy_entry = __ pc();
1928 } else {
1929 disjoint_copy_entry = disjoint_long_copy_entry;
1930 long_copy_entry = __ pc();
1931 }
1932 BLOCK_COMMENT("Entry:");
1933 // caller can pass a 64-bit byte count here (from Unsafe.copyMemory)
1934
1935 array_overlap_test(disjoint_copy_entry, Address::times_8);
1936 setup_arg_regs(); // from => rdi, to => rsi, count => rdx
1937 // r9 and r10 may be used to save non-volatile registers
1938
1939 // 'from', 'to' and 'qword_count' are now valid
1940
1941 if (is_oop) {
1942 // Save to and count for store barrier
1943 __ movq(saved_count, qword_count);
1944 // No registers are destroyed by this call
1945 gen_write_ref_array_pre_barrier(to, saved_count);
1946 }
1947
1948 // Copy from high to low addresses. Use rcx as scratch.
1949
1950 __ jmp(L_copy_32_bytes);
1951
1952 // Copy trailing qwords
1953 __ BIND(L_copy_8_bytes);
1954 __ movq(rax, Address(from, qword_count, Address::times_8, -8));
1955 __ movq(Address(to, qword_count, Address::times_8, -8), rax);
1956 __ decrementq(qword_count);
1957 __ jcc(Assembler::notZero, L_copy_8_bytes);
1958
1959 if (is_oop) {
1960 __ jmp(L_exit);
1961 } else {
1962 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1963 restore_arg_regs();
1964 __ xorq(rax, rax); // return 0
1965 __ leave(); // required for proper stackwalking of RuntimeStub frame
1966 __ ret(0);
1967 }
1968
1969 // Copy in 32-bytes chunks
1970 copy_32_bytes_backward(from, to, qword_count, rax, L_copy_32_bytes, L_copy_8_bytes);
1971
1972 if (is_oop) {
1973 __ BIND(L_exit);
1974 __ leaq(rcx, Address(to, saved_count, Address::times_8, -8));
1975 gen_write_ref_array_post_barrier(to, rcx, rax);
1976 inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
1977 } else {
1978 inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
1979 }
1980 restore_arg_regs();
1981 __ xorq(rax, rax); // return 0
1982 __ leave(); // required for proper stackwalking of RuntimeStub frame
1983 __ ret(0);
1984
1985 return start;
1986 }
1987
1988
1989 // Helper for generating a dynamic type check.
1990 // Smashes no registers.
1991 void generate_type_check(Register sub_klass,
1992 Register super_check_offset,
1993 Register super_klass,
1994 Label& L_success) {
1995 assert_different_registers(sub_klass, super_check_offset, super_klass);
1996
1997 BLOCK_COMMENT("type_check:");
1998
1999 Label L_miss;
2000
2001 // a couple of useful fields in sub_klass:
2002 int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
2003 Klass::secondary_supers_offset_in_bytes());
2004 int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
2005 Klass::secondary_super_cache_offset_in_bytes());
2006 Address secondary_supers_addr(sub_klass, ss_offset);
2007 Address super_cache_addr( sub_klass, sc_offset);
2008
2009 // if the pointers are equal, we are done (e.g., String[] elements)
2010 __ cmpq(super_klass, sub_klass);
2011 __ jcc(Assembler::equal, L_success);
2012
2013 // check the supertype display:
2014 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
2015 __ cmpq(super_klass, super_check_addr); // test the super type
2016 __ jcc(Assembler::equal, L_success);
2017
2018 // if it was a primary super, we can just fail immediately
2019 __ cmpl(super_check_offset, sc_offset);
2020 __ jcc(Assembler::notEqual, L_miss);
2021
2022 // Now do a linear scan of the secondary super-klass chain.
2023 // The repne_scan instruction uses fixed registers, which we must spill.
2024 // (We need a couple more temps in any case.)
2025 // This code is rarely used, so simplicity is a virtue here.
2026 inc_counter_np(SharedRuntime::_partial_subtype_ctr);
2027 {
2028 __ pushq(rax);
2029 __ pushq(rcx);
2030 __ pushq(rdi);
2031 assert_different_registers(sub_klass, super_klass, rax, rcx, rdi);
2032
2033 __ movq(rdi, secondary_supers_addr);
2034 // Load the array length.
2035 __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
2036 // Skip to start of data.
2037 __ addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
2038 // Scan rcx words at [rdi] for occurance of rax
2039 // Set NZ/Z based on last compare
2040 __ movq(rax, super_klass);
2041 __ repne_scan();
2042
2043 // Unspill the temp. registers:
2044 __ popq(rdi);
2045 __ popq(rcx);
2046 __ popq(rax);
2047
2048 __ jcc(Assembler::notEqual, L_miss);
2049 }
2050
2051 // Success. Cache the super we found and proceed in triumph.
2052 __ movq(super_cache_addr, super_klass); // note: rax is dead
2053 __ jmp(L_success);
2054
2055 // Fall through on failure!
2056 __ BIND(L_miss);
2057 }
2058
2059 //
2060 // Generate checkcasting array copy stub
2061 //
2062 // Input:
2063 // c_rarg0 - source array address
2064 // c_rarg1 - destination array address
2065 // c_rarg2 - element count, treated as ssize_t, can be zero
2066 // c_rarg3 - size_t ckoff (super_check_offset)
2067 // not Win64
2068 // c_rarg4 - oop ckval (super_klass)
2069 // Win64
2070 // rsp+40 - oop ckval (super_klass)
2071 //
2072 // Output:
2073 // rax == 0 - success
2074 // rax == -1^K - failure, where K is partial transfer count
2075 //
2076 address generate_checkcast_copy(const char *name) {
2077
2078 Label L_load_element, L_store_element, L_do_card_marks, L_done;
2079
2080 // Input registers (after setup_arg_regs)
2081 const Register from = rdi; // source array address
2082 const Register to = rsi; // destination array address
2083 const Register length = rdx; // elements count
2084 const Register ckoff = rcx; // super_check_offset
2085 const Register ckval = r8; // super_klass
2086
2087 // Registers used as temps (r13, r14 are save-on-entry)
2088 const Register end_from = from; // source array end address
2089 const Register end_to = r13; // destination array end address
2090 const Register count = rdx; // -(count_remaining)
2091 const Register r14_length = r14; // saved copy of length
2092 // End pointers are inclusive, and if length is not zero they point
2093 // to the last unit copied: end_to[0] := end_from[0]
2094
2095 const Register rax_oop = rax; // actual oop copied
2096 const Register r11_klass = r11; // oop._klass
2097
2098 //---------------------------------------------------------------
2099 // Assembler stub will be used for this call to arraycopy
2100 // if the two arrays are subtypes of Object[] but the
2101 // destination array type is not equal to or a supertype
2102 // of the source type. Each element must be separately
2103 // checked.
2104
2105 __ align(CodeEntryAlignment);
2106 StubCodeMark mark(this, "StubRoutines", name);
2107 address start = __ pc();
2108
2109 __ enter(); // required for proper stackwalking of RuntimeStub frame
2110
2111 checkcast_copy_entry = __ pc();
2112 BLOCK_COMMENT("Entry:");
2113
2114 #ifdef ASSERT
2115 // caller guarantees that the arrays really are different
2116 // otherwise, we would have to make conjoint checks
2117 { Label L;
2118 array_overlap_test(L, Address::times_8);
2119 __ stop("checkcast_copy within a single array");
2120 __ bind(L);
2121 }
2122 #endif //ASSERT
2123
2124 // allocate spill slots for r13, r14
2125 enum {
2126 saved_r13_offset,
2127 saved_r14_offset,
2128 saved_rbp_offset,
2129 saved_rip_offset,
2130 saved_rarg0_offset
2131 };
2132 __ subq(rsp, saved_rbp_offset * wordSize);
2133 __ movq(Address(rsp, saved_r13_offset * wordSize), r13);
2134 __ movq(Address(rsp, saved_r14_offset * wordSize), r14);
2135 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
2136 // ckoff => rcx, ckval => r8
2137 // r9 and r10 may be used to save non-volatile registers
2138 #ifdef _WIN64
2139 // last argument (#4) is on stack on Win64
2140 const int ckval_offset = saved_rarg0_offset + 4;
2141 __ movq(ckval, Address(rsp, ckval_offset * wordSize));
2142 #endif
2143
2144 // check that int operands are properly extended to size_t
2145 assert_clean_int(length, rax);
2146 assert_clean_int(ckoff, rax);
2147
2148 #ifdef ASSERT
2149 BLOCK_COMMENT("assert consistent ckoff/ckval");
2150 // The ckoff and ckval must be mutually consistent,
2151 // even though caller generates both.
2152 { Label L;
2153 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2154 Klass::super_check_offset_offset_in_bytes());
2155 __ cmpl(ckoff, Address(ckval, sco_offset));
2156 __ jcc(Assembler::equal, L);
2157 __ stop("super_check_offset inconsistent");
2158 __ bind(L);
2159 }
2160 #endif //ASSERT
2161
2162 // Loop-invariant addresses. They are exclusive end pointers.
2163 Address end_from_addr(from, length, Address::times_8, 0);
2164 Address end_to_addr(to, length, Address::times_8, 0);
2165 // Loop-variant addresses. They assume post-incremented count < 0.
2166 Address from_element_addr(end_from, count, Address::times_8, 0);
2167 Address to_element_addr(end_to, count, Address::times_8, 0);
2168 Address oop_klass_addr(rax_oop, oopDesc::klass_offset_in_bytes());
2169
2170 gen_write_ref_array_pre_barrier(to, count);
2171
2172 // Copy from low to high addresses, indexed from the end of each array.
2173 __ leaq(end_from, end_from_addr);
2174 __ leaq(end_to, end_to_addr);
2175 __ movq(r14_length, length); // save a copy of the length
2176 assert(length == count, ""); // else fix next line:
2177 __ negq(count); // negate and test the length
2178 __ jcc(Assembler::notZero, L_load_element);
2179
2180 // Empty array: Nothing to do.
2181 __ xorq(rax, rax); // return 0 on (trivial) success
2182 __ jmp(L_done);
2183
2184 // ======== begin loop ========
2185 // (Loop is rotated; its entry is L_load_element.)
2186 // Loop control:
2187 // for (count = -count; count != 0; count++)
2188 // Base pointers src, dst are biased by 8*(count-1),to last element.
2189 __ align(16);
2190
2191 __ BIND(L_store_element);
2192 __ movq(to_element_addr, rax_oop); // store the oop
2193 __ incrementq(count); // increment the count toward zero
2194 __ jcc(Assembler::zero, L_do_card_marks);
2195
2196 // ======== loop entry is here ========
2197 __ BIND(L_load_element);
2198 __ movq(rax_oop, from_element_addr); // load the oop
2199 __ testq(rax_oop, rax_oop);
2200 __ jcc(Assembler::zero, L_store_element);
2201
2202 __ movq(r11_klass, oop_klass_addr); // query the object klass
2203 generate_type_check(r11_klass, ckoff, ckval, L_store_element);
2204 // ======== end loop ========
2205
2206 // It was a real error; we must depend on the caller to finish the job.
2207 // Register rdx = -1 * number of *remaining* oops, r14 = *total* oops.
2208 // Emit GC store barriers for the oops we have copied (r14 + rdx),
2209 // and report their number to the caller.
2210 assert_different_registers(rax, r14_length, count, to, end_to, rcx);
2211 __ leaq(end_to, to_element_addr);
2212 gen_write_ref_array_post_barrier(to, end_to, rcx);
2213 __ movq(rax, r14_length); // original oops
2214 __ addq(rax, count); // K = (original - remaining) oops
2215 __ notq(rax); // report (-1^K) to caller
2216 __ jmp(L_done);
2217
2218 // Come here on success only.
2219 __ BIND(L_do_card_marks);
2220 __ addq(end_to, -wordSize); // make an inclusive end pointer
2221 gen_write_ref_array_post_barrier(to, end_to, rcx);
2222 __ xorq(rax, rax); // return 0 on success
2223
2224 // Common exit point (success or failure).
2225 __ BIND(L_done);
2226 __ movq(r13, Address(rsp, saved_r13_offset * wordSize));
2227 __ movq(r14, Address(rsp, saved_r14_offset * wordSize));
2228 inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
2229 restore_arg_regs();
2230 __ leave(); // required for proper stackwalking of RuntimeStub frame
2231 __ ret(0);
2232
2233 return start;
2234 }
2235
2236 //
2237 // Generate 'unsafe' array copy stub
2238 // Though just as safe as the other stubs, it takes an unscaled
2239 // size_t argument instead of an element count.
2240 //
2241 // Input:
2242 // c_rarg0 - source array address
2243 // c_rarg1 - destination array address
2244 // c_rarg2 - byte count, treated as ssize_t, can be zero
2245 //
2246 // Examines the alignment of the operands and dispatches
2247 // to a long, int, short, or byte copy loop.
2248 //
2249 address generate_unsafe_copy(const char *name) {
2250
2251 Label L_long_aligned, L_int_aligned, L_short_aligned;
2252
2253 // Input registers (before setup_arg_regs)
2254 const Register from = c_rarg0; // source array address
2255 const Register to = c_rarg1; // destination array address
2256 const Register size = c_rarg2; // byte count (size_t)
2257
2258 // Register used as a temp
2259 const Register bits = rax; // test copy of low bits
2260
2261 __ align(CodeEntryAlignment);
2262 StubCodeMark mark(this, "StubRoutines", name);
2263 address start = __ pc();
2264
2265 __ enter(); // required for proper stackwalking of RuntimeStub frame
2266
2267 // bump this on entry, not on exit:
2268 inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
2269
2270 __ movq(bits, from);
2271 __ orq(bits, to);
2272 __ orq(bits, size);
2273
2274 __ testb(bits, BytesPerLong-1);
2275 __ jccb(Assembler::zero, L_long_aligned);
2276
2277 __ testb(bits, BytesPerInt-1);
2278 __ jccb(Assembler::zero, L_int_aligned);
2279
2280 __ testb(bits, BytesPerShort-1);
2281 __ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
2282
2283 __ BIND(L_short_aligned);
2284 __ shrq(size, LogBytesPerShort); // size => short_count
2285 __ jump(RuntimeAddress(short_copy_entry));
2286
2287 __ BIND(L_int_aligned);
2288 __ shrq(size, LogBytesPerInt); // size => int_count
2289 __ jump(RuntimeAddress(int_copy_entry));
2290
2291 __ BIND(L_long_aligned);
2292 __ shrq(size, LogBytesPerLong); // size => qword_count
2293 __ jump(RuntimeAddress(long_copy_entry));
2294
2295 return start;
2296 }
2297
2298 // Perform range checks on the proposed arraycopy.
2299 // Kills temp, but nothing else.
2300 // Also, clean the sign bits of src_pos and dst_pos.
2301 void arraycopy_range_checks(Register src, // source array oop (c_rarg0)
2302 Register src_pos, // source position (c_rarg1)
2303 Register dst, // destination array oo (c_rarg2)
2304 Register dst_pos, // destination position (c_rarg3)
2305 Register length,
2306 Register temp,
2307 Label& L_failed) {
2308 BLOCK_COMMENT("arraycopy_range_checks:");
2309
2310 // if (src_pos + length > arrayOop(src)->length()) FAIL;
2311 __ movl(temp, length);
2312 __ addl(temp, src_pos); // src_pos + length
2313 __ cmpl(temp, Address(src, arrayOopDesc::length_offset_in_bytes()));
2314 __ jcc(Assembler::above, L_failed);
2315
2316 // if (dst_pos + length > arrayOop(dst)->length()) FAIL;
2317 __ movl(temp, length);
2318 __ addl(temp, dst_pos); // dst_pos + length
2319 __ cmpl(temp, Address(dst, arrayOopDesc::length_offset_in_bytes()));
2320 __ jcc(Assembler::above, L_failed);
2321
2322 // Have to clean up high 32-bits of 'src_pos' and 'dst_pos'.
2323 // Move with sign extension can be used since they are positive.
2324 __ movslq(src_pos, src_pos);
2325 __ movslq(dst_pos, dst_pos);
2326
2327 BLOCK_COMMENT("arraycopy_range_checks done");
2328 }
2329
2330 //
2331 // Generate generic array copy stubs
2332 //
2333 // Input:
2334 // c_rarg0 - src oop
2335 // c_rarg1 - src_pos (32-bits)
2336 // c_rarg2 - dst oop
2337 // c_rarg3 - dst_pos (32-bits)
2338 // not Win64
2339 // c_rarg4 - element count (32-bits)
2340 // Win64
2341 // rsp+40 - element count (32-bits)
2342 //
2343 // Output:
2344 // rax == 0 - success
2345 // rax == -1^K - failure, where K is partial transfer count
2346 //
2347 address generate_generic_copy(const char *name) {
2348
2349 Label L_failed, L_failed_0, L_objArray;
2350 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs;
2351
2352 // Input registers
2353 const Register src = c_rarg0; // source array oop
2354 const Register src_pos = c_rarg1; // source position
2355 const Register dst = c_rarg2; // destination array oop
2356 const Register dst_pos = c_rarg3; // destination position
2357 // elements count is on stack on Win64
2358 #ifdef _WIN64
2359 #define C_RARG4 Address(rsp, 6 * wordSize)
2360 #else
2361 #define C_RARG4 c_rarg4
2362 #endif
2363
2364 { int modulus = CodeEntryAlignment;
2365 int target = modulus - 5; // 5 = sizeof jmp(L_failed)
2366 int advance = target - (__ offset() % modulus);
2367 if (advance < 0) advance += modulus;
2368 if (advance > 0) __ nop(advance);
2369 }
2370 StubCodeMark mark(this, "StubRoutines", name);
2371
2372 // Short-hop target to L_failed. Makes for denser prologue code.
2373 __ BIND(L_failed_0);
2374 __ jmp(L_failed);
2375 assert(__ offset() % CodeEntryAlignment == 0, "no further alignment needed");
2376
2377 __ align(CodeEntryAlignment);
2378 address start = __ pc();
2379
2380 __ enter(); // required for proper stackwalking of RuntimeStub frame
2381
2382 // bump this on entry, not on exit:
2383 inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
2384
2385 //-----------------------------------------------------------------------
2386 // Assembler stub will be used for this call to arraycopy
2387 // if the following conditions are met:
2388 //
2389 // (1) src and dst must not be null.
2390 // (2) src_pos must not be negative.
2391 // (3) dst_pos must not be negative.
2392 // (4) length must not be negative.
2393 // (5) src klass and dst klass should be the same and not NULL.
2394 // (6) src and dst should be arrays.
2395 // (7) src_pos + length must not exceed length of src.
2396 // (8) dst_pos + length must not exceed length of dst.
2397 //
2398
2399 // if (src == NULL) return -1;
2400 __ testq(src, src); // src oop
2401 size_t j1off = __ offset();
2402 __ jccb(Assembler::zero, L_failed_0);
2403
2404 // if (src_pos < 0) return -1;
2405 __ testl(src_pos, src_pos); // src_pos (32-bits)
2406 __ jccb(Assembler::negative, L_failed_0);
2407
2408 // if (dst == NULL) return -1;
2409 __ testq(dst, dst); // dst oop
2410 __ jccb(Assembler::zero, L_failed_0);
2411
2412 // if (dst_pos < 0) return -1;
2413 __ testl(dst_pos, dst_pos); // dst_pos (32-bits)
2414 size_t j4off = __ offset();
2415 __ jccb(Assembler::negative, L_failed_0);
2416
2417 // The first four tests are very dense code,
2418 // but not quite dense enough to put four
2419 // jumps in a 16-byte instruction fetch buffer.
2420 // That's good, because some branch predicters
2421 // do not like jumps so close together.
2422 // Make sure of this.
2423 guarantee(((j1off ^ j4off) & ~15) != 0, "I$ line of 1st & 4th jumps");
2424
2425 // registers used as temp
2426 const Register r11_length = r11; // elements count to copy
2427 const Register r10_src_klass = r10; // array klass
2428
2429 // if (length < 0) return -1;
2430 __ movl(r11_length, C_RARG4); // length (elements count, 32-bits value)
2431 __ testl(r11_length, r11_length);
2432 __ jccb(Assembler::negative, L_failed_0);
2433
2434 Address src_klass_addr(src, oopDesc::klass_offset_in_bytes());
2435 Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes());
2436 __ movq(r10_src_klass, src_klass_addr);
2437 #ifdef ASSERT
2438 // assert(src->klass() != NULL);
2439 BLOCK_COMMENT("assert klasses not null");
2440 { Label L1, L2;
2441 __ testq(r10_src_klass, r10_src_klass);
2442 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL
2443 __ bind(L1);
2444 __ stop("broken null klass");
2445 __ bind(L2);
2446 __ cmpq(dst_klass_addr, 0);
2447 __ jcc(Assembler::equal, L1); // this would be broken also
2448 BLOCK_COMMENT("assert done");
2449 }
2450 #endif
2451
2452 // Load layout helper (32-bits)
2453 //
2454 // |array_tag| | header_size | element_type | |log2_element_size|
2455 // 32 30 24 16 8 2 0
2456 //
2457 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2458 //
2459
2460 int lh_offset = klassOopDesc::header_size() * HeapWordSize +
2461 Klass::layout_helper_offset_in_bytes();
2462
2463 const Register rax_lh = rax; // layout helper
2464
2465 __ movl(rax_lh, Address(r10_src_klass, lh_offset));
2466
2467 // Handle objArrays completely differently...
2468 jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2469 __ cmpl(rax_lh, objArray_lh);
2470 __ jcc(Assembler::equal, L_objArray);
2471
2472 // if (src->klass() != dst->klass()) return -1;
2473 __ cmpq(r10_src_klass, dst_klass_addr);
2474 __ jcc(Assembler::notEqual, L_failed);
2475
2476 // if (!src->is_Array()) return -1;
2477 __ cmpl(rax_lh, Klass::_lh_neutral_value);
2478 __ jcc(Assembler::greaterEqual, L_failed);
2479
2480 // At this point, it is known to be a typeArray (array_tag 0x3).
2481 #ifdef ASSERT
2482 { Label L;
2483 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
2484 __ jcc(Assembler::greaterEqual, L);
2485 __ stop("must be a primitive array");
2486 __ bind(L);
2487 }
2488 #endif
2489
2490 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2491 r10, L_failed);
2492
2493 // typeArrayKlass
2494 //
2495 // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2496 // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2497 //
2498
2499 const Register r10_offset = r10; // array offset
2500 const Register rax_elsize = rax_lh; // element size
2501
2502 __ movl(r10_offset, rax_lh);
2503 __ shrl(r10_offset, Klass::_lh_header_size_shift);
2504 __ andq(r10_offset, Klass::_lh_header_size_mask); // array_offset
2505 __ addq(src, r10_offset); // src array offset
2506 __ addq(dst, r10_offset); // dst array offset
2507 BLOCK_COMMENT("choose copy loop based on element size");
2508 __ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize
2509
2510 // next registers should be set before the jump to corresponding stub
2511 const Register from = c_rarg0; // source array address
2512 const Register to = c_rarg1; // destination array address
2513 const Register count = c_rarg2; // elements count
2514
2515 // 'from', 'to', 'count' registers should be set in such order
2516 // since they are the same as 'src', 'src_pos', 'dst'.
2517
2518 __ BIND(L_copy_bytes);
2519 __ cmpl(rax_elsize, 0);
2520 __ jccb(Assembler::notEqual, L_copy_shorts);
2521 __ leaq(from, Address(src, src_pos, Address::times_1, 0));// src_addr
2522 __ leaq(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr
2523 __ movslq(count, r11_length); // length
2524 __ jump(RuntimeAddress(byte_copy_entry));
2525
2526 __ BIND(L_copy_shorts);
2527 __ cmpl(rax_elsize, LogBytesPerShort);
2528 __ jccb(Assembler::notEqual, L_copy_ints);
2529 __ leaq(from, Address(src, src_pos, Address::times_2, 0));// src_addr
2530 __ leaq(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr
2531 __ movslq(count, r11_length); // length
2532 __ jump(RuntimeAddress(short_copy_entry));
2533
2534 __ BIND(L_copy_ints);
2535 __ cmpl(rax_elsize, LogBytesPerInt);
2536 __ jccb(Assembler::notEqual, L_copy_longs);
2537 __ leaq(from, Address(src, src_pos, Address::times_4, 0));// src_addr
2538 __ leaq(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr
2539 __ movslq(count, r11_length); // length
2540 __ jump(RuntimeAddress(int_copy_entry));
2541
2542 __ BIND(L_copy_longs);
2543 #ifdef ASSERT
2544 { Label L;
2545 __ cmpl(rax_elsize, LogBytesPerLong);
2546 __ jcc(Assembler::equal, L);
2547 __ stop("must be long copy, but elsize is wrong");
2548 __ bind(L);
2549 }
2550 #endif
2551 __ leaq(from, Address(src, src_pos, Address::times_8, 0));// src_addr
2552 __ leaq(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr
2553 __ movslq(count, r11_length); // length
2554 __ jump(RuntimeAddress(long_copy_entry));
2555
2556 // objArrayKlass
2557 __ BIND(L_objArray);
2558 // live at this point: r10_src_klass, src[_pos], dst[_pos]
2559
2560 Label L_plain_copy, L_checkcast_copy;
2561 // test array classes for subtyping
2562 __ cmpq(r10_src_klass, dst_klass_addr); // usual case is exact equality
2563 __ jcc(Assembler::notEqual, L_checkcast_copy);
2564
2565 // Identically typed arrays can be copied without element-wise checks.
2566 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2567 r10, L_failed);
2568
2569 __ leaq(from, Address(src, src_pos, Address::times_8,
2570 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
2571 __ leaq(to, Address(dst, dst_pos, Address::times_8,
2572 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
2573 __ movslq(count, r11_length); // length
2574 __ BIND(L_plain_copy);
2575 __ jump(RuntimeAddress(oop_copy_entry));
2576
2577 __ BIND(L_checkcast_copy);
2578 // live at this point: r10_src_klass, !r11_length
2579 {
2580 // assert(r11_length == C_RARG4); // will reload from here
2581 Register r11_dst_klass = r11;
2582 __ movq(r11_dst_klass, dst_klass_addr);
2583
2584 // Before looking at dst.length, make sure dst is also an objArray.
2585 __ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh);
2586 __ jcc(Assembler::notEqual, L_failed);
2587
2588 // It is safe to examine both src.length and dst.length.
2589 #ifndef _WIN64
2590 arraycopy_range_checks(src, src_pos, dst, dst_pos, C_RARG4,
2591 rax, L_failed);
2592 #else
2593 __ movl(r11_length, C_RARG4); // reload
2594 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
2595 rax, L_failed);
2596 __ movl(r11_dst_klass, dst_klass_addr); // reload
2597 #endif
2598
2599 // Marshal the base address arguments now, freeing registers.
2600 __ leaq(from, Address(src, src_pos, Address::times_8,
2601 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2602 __ leaq(to, Address(dst, dst_pos, Address::times_8,
2603 arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
2604 __ movl(count, C_RARG4); // length (reloaded)
2605 Register sco_temp = c_rarg3; // this register is free now
2606 assert_different_registers(from, to, count, sco_temp,
2607 r11_dst_klass, r10_src_klass);
2608 assert_clean_int(count, sco_temp);
2609
2610 // Generate the type check.
2611 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
2612 Klass::super_check_offset_offset_in_bytes());
2613 __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
2614 assert_clean_int(sco_temp, rax);
2615 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
2616
2617 // Fetch destination element klass from the objArrayKlass header.
2618 int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
2619 objArrayKlass::element_klass_offset_in_bytes());
2620 __ movq(r11_dst_klass, Address(r11_dst_klass, ek_offset));
2621 __ movl(sco_temp, Address(r11_dst_klass, sco_offset));
2622 assert_clean_int(sco_temp, rax);
2623
2624 // the checkcast_copy loop needs two extra arguments:
2625 assert(c_rarg3 == sco_temp, "#3 already in place");
2626 __ movq(C_RARG4, r11_dst_klass); // dst.klass.element_klass
2627 __ jump(RuntimeAddress(checkcast_copy_entry));
2628 }
2629
2630 __ BIND(L_failed);
2631 __ xorq(rax, rax);
2632 __ notq(rax); // return -1
2633 __ leave(); // required for proper stackwalking of RuntimeStub frame
2634 __ ret(0);
2635
2636 return start;
2637 }
2638
2639 #undef length_arg
2640
2641 void generate_arraycopy_stubs() {
2642 // Call the conjoint generation methods immediately after
2643 // the disjoint ones so that short branches from the former
2644 // to the latter can be generated.
2645 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
2646 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
2647
2648 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
2649 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy");
2650
2651 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
2652 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy");
2653
2654 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy");
2655 StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy");
2656
2657 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy");
2658 StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy");
2659
2660 StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
2661 StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy");
2662 StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy");
2663
2664 // We don't generate specialized code for HeapWord-aligned source
2665 // arrays, so just use the code we've already generated
2666 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = StubRoutines::_jbyte_disjoint_arraycopy;
2667 StubRoutines::_arrayof_jbyte_arraycopy = StubRoutines::_jbyte_arraycopy;
2668
2669 StubRoutines::_arrayof_jshort_disjoint_arraycopy = StubRoutines::_jshort_disjoint_arraycopy;
2670 StubRoutines::_arrayof_jshort_arraycopy = StubRoutines::_jshort_arraycopy;
2671
2672 StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy;
2673 StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
2674
2675 StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy;
2676 StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
2677
2678 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy;
2679 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
2680 }
2681
2682 #undef __
2683 #define __ masm->
2684
2685 // Continuation point for throwing of implicit exceptions that are
2686 // not handled in the current activation. Fabricates an exception
2687 // oop and initiates normal exception dispatching in this
2688 // frame. Since we need to preserve callee-saved values (currently
2689 // only for C2, but done for C1 as well) we need a callee-saved oop
2690 // map and therefore have to make these stubs into RuntimeStubs
2691 // rather than BufferBlobs. If the compiler needs all registers to
2692 // be preserved between the fault point and the exception handler
2693 // then it must assume responsibility for that in
2694 // AbstractCompiler::continuation_for_implicit_null_exception or
2695 // continuation_for_implicit_division_by_zero_exception. All other
2696 // implicit exceptions (e.g., NullPointerException or
2697 // AbstractMethodError on entry) are either at call sites or
2698 // otherwise assume that stack unwinding will be initiated, so
2699 // caller saved registers were assumed volatile in the compiler.
2700 address generate_throw_exception(const char* name,
2701 address runtime_entry,
2702 bool restore_saved_exception_pc) {
2703 // Information about frame layout at time of blocking runtime call.
2704 // Note that we only have to preserve callee-saved registers since
2705 // the compilers are responsible for supplying a continuation point
2706 // if they expect all registers to be preserved.
2707 enum layout {
2708 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
2709 rbp_off2,
2710 return_off,
2711 return_off2,
2712 framesize // inclusive of return address
2713 };
2714
2715 int insts_size = 512;
2716 int locs_size = 64;
2717
2718 CodeBuffer code(name, insts_size, locs_size);
2719 OopMapSet* oop_maps = new OopMapSet();
2720 MacroAssembler* masm = new MacroAssembler(&code);
2721
2722 address start = __ pc();
2723
2724 // This is an inlined and slightly modified version of call_VM
2725 // which has the ability to fetch the return PC out of
2726 // thread-local storage and also sets up last_Java_sp slightly
2727 // differently than the real call_VM
2728 if (restore_saved_exception_pc) {
2729 __ movq(rax,
2730 Address(r15_thread,
2731 in_bytes(JavaThread::saved_exception_pc_offset())));
2732 __ pushq(rax);
2733 }
2734
2735 __ enter(); // required for proper stackwalking of RuntimeStub frame
2736
2737 assert(is_even(framesize/2), "sp not 16-byte aligned");
2738
2739 // return address and rbp are already in place
2740 __ subq(rsp, (framesize-4) << LogBytesPerInt); // prolog
2741
2742 int frame_complete = __ pc() - start;
2743
2744 // Set up last_Java_sp and last_Java_fp
2745 __ set_last_Java_frame(rsp, rbp, NULL);
2746
2747 // Call runtime
2748 __ movq(c_rarg0, r15_thread);
2749 BLOCK_COMMENT("call runtime_entry");
2750 __ call(RuntimeAddress(runtime_entry));
2751
2752 // Generate oop map
2753 OopMap* map = new OopMap(framesize, 0);
2754
2755 oop_maps->add_gc_map(__ pc() - start, map);
2756
2757 __ reset_last_Java_frame(true, false);
2758
2759 __ leave(); // required for proper stackwalking of RuntimeStub frame
2760
2761 // check for pending exceptions
2762 #ifdef ASSERT
2763 Label L;
2764 __ cmpq(Address(r15_thread, Thread::pending_exception_offset()),
2765 (int) NULL);
2766 __ jcc(Assembler::notEqual, L);
2767 __ should_not_reach_here();
2768 __ bind(L);
2769 #endif // ASSERT
2770 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2771
2772
2773 // codeBlob framesize is in words (not VMRegImpl::slot_size)
2774 RuntimeStub* stub =
2775 RuntimeStub::new_runtime_stub(name,
2776 &code,
2777 frame_complete,
2778 (framesize >> (LogBytesPerWord - LogBytesPerInt)),
2779 oop_maps, false);
2780 return stub->entry_point();
2781 }
2782
2783 // Initialization
2784 void generate_initial() {
2785 // Generates all stubs and initializes the entry points
2786
2787 // This platform-specific stub is needed by generate_call_stub()
2788 StubRoutines::amd64::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80);
2789
2790 // entry points that exist in all platforms Note: This is code
2791 // that could be shared among different platforms - however the
2792 // benefit seems to be smaller than the disadvantage of having a
2793 // much more complicated generator structure. See also comment in
2794 // stubRoutines.hpp.
2795
2796 StubRoutines::_forward_exception_entry = generate_forward_exception();
2797
2798 StubRoutines::_call_stub_entry =
2799 generate_call_stub(StubRoutines::_call_stub_return_address);
2800
2801 // is referenced by megamorphic call
2802 StubRoutines::_catch_exception_entry = generate_catch_exception();
2803
2804 // atomic calls
2805 StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
2806 StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr();
2807 StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
2808 StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
2809 StubRoutines::_atomic_add_entry = generate_atomic_add();
2810 StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
2811 StubRoutines::_fence_entry = generate_orderaccess_fence();
2812
2813 StubRoutines::_handler_for_unsafe_access_entry =
2814 generate_handler_for_unsafe_access();
2815
2816 // platform dependent
2817 StubRoutines::amd64::_get_previous_fp_entry = generate_get_previous_fp();
2818
2819 StubRoutines::amd64::_verify_mxcsr_entry = generate_verify_mxcsr();
2820 }
2821
2822 void generate_all() {
2823 // Generates all stubs and initializes the entry points
2824
2825 // These entry points require SharedInfo::stack0 to be set up in
2826 // non-core builds and need to be relocatable, so they each
2827 // fabricate a RuntimeStub internally.
2828 StubRoutines::_throw_AbstractMethodError_entry =
2829 generate_throw_exception("AbstractMethodError throw_exception",
2830 CAST_FROM_FN_PTR(address,
2831 SharedRuntime::
2832 throw_AbstractMethodError),
2833 false);
2834
2835 StubRoutines::_throw_ArithmeticException_entry =
2836 generate_throw_exception("ArithmeticException throw_exception",
2837 CAST_FROM_FN_PTR(address,
2838 SharedRuntime::
2839 throw_ArithmeticException),
2840 true);
2841
2842 StubRoutines::_throw_NullPointerException_entry =
2843 generate_throw_exception("NullPointerException throw_exception",
2844 CAST_FROM_FN_PTR(address,
2845 SharedRuntime::
2846 throw_NullPointerException),
2847 true);
2848
2849 StubRoutines::_throw_NullPointerException_at_call_entry =
2850 generate_throw_exception("NullPointerException at call throw_exception",
2851 CAST_FROM_FN_PTR(address,
2852 SharedRuntime::
2853 throw_NullPointerException_at_call),
2854 false);
2855
2856 StubRoutines::_throw_StackOverflowError_entry =
2857 generate_throw_exception("StackOverflowError throw_exception",
2858 CAST_FROM_FN_PTR(address,
2859 SharedRuntime::
2860 throw_StackOverflowError),
2861 false);
2862
2863 // entry points that are platform specific
2864 StubRoutines::amd64::_f2i_fixup = generate_f2i_fixup();
2865 StubRoutines::amd64::_f2l_fixup = generate_f2l_fixup();
2866 StubRoutines::amd64::_d2i_fixup = generate_d2i_fixup();
2867 StubRoutines::amd64::_d2l_fixup = generate_d2l_fixup();
2868
2869 StubRoutines::amd64::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
2870 StubRoutines::amd64::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
2871 StubRoutines::amd64::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
2872 StubRoutines::amd64::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
2873
2874 // support for verify_oop (must happen after universe_init)
2875 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
2876
2877 // arraycopy stubs used by compilers
2878 generate_arraycopy_stubs();
2879 }
2880
2881 public:
2882 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
2883 if (all) {
2884 generate_all();
2885 } else {
2886 generate_initial();
2887 }
2888 }
2889 }; // end class declaration
2890
2891 address StubGenerator::disjoint_byte_copy_entry = NULL;
2892 address StubGenerator::disjoint_short_copy_entry = NULL;
2893 address StubGenerator::disjoint_int_copy_entry = NULL;
2894 address StubGenerator::disjoint_long_copy_entry = NULL;
2895 address StubGenerator::disjoint_oop_copy_entry = NULL;
2896
2897 address StubGenerator::byte_copy_entry = NULL;
2898 address StubGenerator::short_copy_entry = NULL;
2899 address StubGenerator::int_copy_entry = NULL;
2900 address StubGenerator::long_copy_entry = NULL;
2901 address StubGenerator::oop_copy_entry = NULL;
2902
2903 address StubGenerator::checkcast_copy_entry = NULL;
2904
2905 void StubGenerator_generate(CodeBuffer* code, bool all) {
2906 StubGenerator g(code, all);
2907 }