Mercurial > hg > truffle
comparison src/cpu/x86/vm/templateTable_x86_64.cpp @ 0:a61af66fc99e jdk7-b24
Initial load
author | duke |
---|---|
date | Sat, 01 Dec 2007 00:00:00 +0000 |
parents | |
children | ba764ed4b6f2 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a61af66fc99e |
---|---|
1 /* | |
2 * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_templateTable_x86_64.cpp.incl" | |
27 | |
28 #define __ _masm-> | |
29 | |
30 // Platform-dependent initialization | |
31 | |
32 void TemplateTable::pd_initialize() { | |
33 // No amd64 specific initialization | |
34 } | |
35 | |
36 // Address computation: local variables | |
37 | |
38 static inline Address iaddress(int n) { | |
39 return Address(r14, Interpreter::local_offset_in_bytes(n)); | |
40 } | |
41 | |
42 static inline Address laddress(int n) { | |
43 return iaddress(n + 1); | |
44 } | |
45 | |
46 static inline Address faddress(int n) { | |
47 return iaddress(n); | |
48 } | |
49 | |
50 static inline Address daddress(int n) { | |
51 return laddress(n); | |
52 } | |
53 | |
54 static inline Address aaddress(int n) { | |
55 return iaddress(n); | |
56 } | |
57 | |
58 static inline Address iaddress(Register r) { | |
59 return Address(r14, r, Address::times_8, Interpreter::value_offset_in_bytes()); | |
60 } | |
61 | |
62 static inline Address laddress(Register r) { | |
63 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1)); | |
64 } | |
65 | |
66 static inline Address faddress(Register r) { | |
67 return iaddress(r); | |
68 } | |
69 | |
70 static inline Address daddress(Register r) { | |
71 return laddress(r); | |
72 } | |
73 | |
74 static inline Address aaddress(Register r) { | |
75 return iaddress(r); | |
76 } | |
77 | |
78 static inline Address at_rsp() { | |
79 return Address(rsp, 0); | |
80 } | |
81 | |
82 // At top of Java expression stack which may be different than esp(). It | |
83 // isn't for category 1 objects. | |
84 static inline Address at_tos () { | |
85 return Address(rsp, Interpreter::expr_offset_in_bytes(0)); | |
86 } | |
87 | |
88 static inline Address at_tos_p1() { | |
89 return Address(rsp, Interpreter::expr_offset_in_bytes(1)); | |
90 } | |
91 | |
92 static inline Address at_tos_p2() { | |
93 return Address(rsp, Interpreter::expr_offset_in_bytes(2)); | |
94 } | |
95 | |
96 static inline Address at_tos_p3() { | |
97 return Address(rsp, Interpreter::expr_offset_in_bytes(3)); | |
98 } | |
99 | |
100 // Condition conversion | |
101 static Assembler::Condition j_not(TemplateTable::Condition cc) { | |
102 switch (cc) { | |
103 case TemplateTable::equal : return Assembler::notEqual; | |
104 case TemplateTable::not_equal : return Assembler::equal; | |
105 case TemplateTable::less : return Assembler::greaterEqual; | |
106 case TemplateTable::less_equal : return Assembler::greater; | |
107 case TemplateTable::greater : return Assembler::lessEqual; | |
108 case TemplateTable::greater_equal: return Assembler::less; | |
109 } | |
110 ShouldNotReachHere(); | |
111 return Assembler::zero; | |
112 } | |
113 | |
114 | |
115 // Miscelaneous helper routines | |
116 | |
117 Address TemplateTable::at_bcp(int offset) { | |
118 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); | |
119 return Address(r13, offset); | |
120 } | |
121 | |
122 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc, | |
123 Register scratch, | |
124 bool load_bc_into_scratch/*=true*/) { | |
125 if (!RewriteBytecodes) { | |
126 return; | |
127 } | |
128 // the pair bytecodes have already done the load. | |
129 if (load_bc_into_scratch) { | |
130 __ movl(bc, bytecode); | |
131 } | |
132 Label patch_done; | |
133 if (JvmtiExport::can_post_breakpoint()) { | |
134 Label fast_patch; | |
135 // if a breakpoint is present we can't rewrite the stream directly | |
136 __ movzbl(scratch, at_bcp(0)); | |
137 __ cmpl(scratch, Bytecodes::_breakpoint); | |
138 __ jcc(Assembler::notEqual, fast_patch); | |
139 __ get_method(scratch); | |
140 // Let breakpoint table handling rewrite to quicker bytecode | |
141 __ call_VM(noreg, | |
142 CAST_FROM_FN_PTR(address, | |
143 InterpreterRuntime::set_original_bytecode_at), | |
144 scratch, r13, bc); | |
145 #ifndef ASSERT | |
146 __ jmpb(patch_done); | |
147 __ bind(fast_patch); | |
148 } | |
149 #else | |
150 __ jmp(patch_done); | |
151 __ bind(fast_patch); | |
152 } | |
153 Label okay; | |
154 __ load_unsigned_byte(scratch, at_bcp(0)); | |
155 __ cmpl(scratch, (int) Bytecodes::java_code(bytecode)); | |
156 __ jcc(Assembler::equal, okay); | |
157 __ cmpl(scratch, bc); | |
158 __ jcc(Assembler::equal, okay); | |
159 __ stop("patching the wrong bytecode"); | |
160 __ bind(okay); | |
161 #endif | |
162 // patch bytecode | |
163 __ movb(at_bcp(0), bc); | |
164 __ bind(patch_done); | |
165 } | |
166 | |
167 | |
168 // Individual instructions | |
169 | |
170 void TemplateTable::nop() { | |
171 transition(vtos, vtos); | |
172 // nothing to do | |
173 } | |
174 | |
175 void TemplateTable::shouldnotreachhere() { | |
176 transition(vtos, vtos); | |
177 __ stop("shouldnotreachhere bytecode"); | |
178 } | |
179 | |
180 void TemplateTable::aconst_null() { | |
181 transition(vtos, atos); | |
182 __ xorl(rax, rax); | |
183 } | |
184 | |
185 void TemplateTable::iconst(int value) { | |
186 transition(vtos, itos); | |
187 if (value == 0) { | |
188 __ xorl(rax, rax); | |
189 } else { | |
190 __ movl(rax, value); | |
191 } | |
192 } | |
193 | |
194 void TemplateTable::lconst(int value) { | |
195 transition(vtos, ltos); | |
196 if (value == 0) { | |
197 __ xorl(rax, rax); | |
198 } else { | |
199 __ movl(rax, value); | |
200 } | |
201 } | |
202 | |
203 void TemplateTable::fconst(int value) { | |
204 transition(vtos, ftos); | |
205 static float one = 1.0f, two = 2.0f; | |
206 switch (value) { | |
207 case 0: | |
208 __ xorps(xmm0, xmm0); | |
209 break; | |
210 case 1: | |
211 __ movflt(xmm0, ExternalAddress((address) &one)); | |
212 break; | |
213 case 2: | |
214 __ movflt(xmm0, ExternalAddress((address) &two)); | |
215 break; | |
216 default: | |
217 ShouldNotReachHere(); | |
218 break; | |
219 } | |
220 } | |
221 | |
222 void TemplateTable::dconst(int value) { | |
223 transition(vtos, dtos); | |
224 static double one = 1.0; | |
225 switch (value) { | |
226 case 0: | |
227 __ xorpd(xmm0, xmm0); | |
228 break; | |
229 case 1: | |
230 __ movdbl(xmm0, ExternalAddress((address) &one)); | |
231 break; | |
232 default: | |
233 ShouldNotReachHere(); | |
234 break; | |
235 } | |
236 } | |
237 | |
238 void TemplateTable::bipush() { | |
239 transition(vtos, itos); | |
240 __ load_signed_byte(rax, at_bcp(1)); | |
241 } | |
242 | |
243 void TemplateTable::sipush() { | |
244 transition(vtos, itos); | |
245 __ load_unsigned_word(rax, at_bcp(1)); | |
246 __ bswapl(rax); | |
247 __ sarl(rax, 16); | |
248 } | |
249 | |
250 void TemplateTable::ldc(bool wide) { | |
251 transition(vtos, vtos); | |
252 Label call_ldc, notFloat, notClass, Done; | |
253 | |
254 if (wide) { | |
255 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); | |
256 } else { | |
257 __ load_unsigned_byte(rbx, at_bcp(1)); | |
258 } | |
259 | |
260 __ get_cpool_and_tags(rcx, rax); | |
261 const int base_offset = constantPoolOopDesc::header_size() * wordSize; | |
262 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; | |
263 | |
264 // get type | |
265 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset)); | |
266 | |
267 // unresolved string - get the resolved string | |
268 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString); | |
269 __ jccb(Assembler::equal, call_ldc); | |
270 | |
271 // unresolved class - get the resolved class | |
272 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass); | |
273 __ jccb(Assembler::equal, call_ldc); | |
274 | |
275 // unresolved class in error state - call into runtime to throw the error | |
276 // from the first resolution attempt | |
277 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError); | |
278 __ jccb(Assembler::equal, call_ldc); | |
279 | |
280 // resolved class - need to call vm to get java mirror of the class | |
281 __ cmpl(rdx, JVM_CONSTANT_Class); | |
282 __ jcc(Assembler::notEqual, notClass); | |
283 | |
284 __ bind(call_ldc); | |
285 __ movl(c_rarg1, wide); | |
286 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1); | |
287 __ push_ptr(rax); | |
288 __ verify_oop(rax); | |
289 __ jmp(Done); | |
290 | |
291 __ bind(notClass); | |
292 __ cmpl(rdx, JVM_CONSTANT_Float); | |
293 __ jccb(Assembler::notEqual, notFloat); | |
294 // ftos | |
295 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); | |
296 __ push_f(); | |
297 __ jmp(Done); | |
298 | |
299 __ bind(notFloat); | |
300 #ifdef ASSERT | |
301 { | |
302 Label L; | |
303 __ cmpl(rdx, JVM_CONSTANT_Integer); | |
304 __ jcc(Assembler::equal, L); | |
305 __ cmpl(rdx, JVM_CONSTANT_String); | |
306 __ jcc(Assembler::equal, L); | |
307 __ stop("unexpected tag type in ldc"); | |
308 __ bind(L); | |
309 } | |
310 #endif | |
311 // atos and itos | |
312 Label isOop; | |
313 __ cmpl(rdx, JVM_CONSTANT_Integer); | |
314 __ jcc(Assembler::notEqual, isOop); | |
315 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset)); | |
316 __ push_i(rax); | |
317 __ jmp(Done); | |
318 | |
319 __ bind(isOop); | |
320 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset)); | |
321 __ push_ptr(rax); | |
322 | |
323 if (VerifyOops) { | |
324 __ verify_oop(rax); | |
325 } | |
326 | |
327 __ bind(Done); | |
328 } | |
329 | |
330 void TemplateTable::ldc2_w() { | |
331 transition(vtos, vtos); | |
332 Label Long, Done; | |
333 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); | |
334 | |
335 __ get_cpool_and_tags(rcx, rax); | |
336 const int base_offset = constantPoolOopDesc::header_size() * wordSize; | |
337 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; | |
338 | |
339 // get type | |
340 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), | |
341 JVM_CONSTANT_Double); | |
342 __ jccb(Assembler::notEqual, Long); | |
343 // dtos | |
344 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); | |
345 __ push_d(); | |
346 __ jmpb(Done); | |
347 | |
348 __ bind(Long); | |
349 // ltos | |
350 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset)); | |
351 __ push_l(); | |
352 | |
353 __ bind(Done); | |
354 } | |
355 | |
356 void TemplateTable::locals_index(Register reg, int offset) { | |
357 __ load_unsigned_byte(reg, at_bcp(offset)); | |
358 __ negq(reg); | |
359 if (TaggedStackInterpreter) __ shlq(reg, 1); // index = index*2 | |
360 } | |
361 | |
362 void TemplateTable::iload() { | |
363 transition(vtos, itos); | |
364 if (RewriteFrequentPairs) { | |
365 Label rewrite, done; | |
366 const Register bc = c_rarg3; | |
367 assert(rbx != bc, "register damaged"); | |
368 | |
369 // get next byte | |
370 __ load_unsigned_byte(rbx, | |
371 at_bcp(Bytecodes::length_for(Bytecodes::_iload))); | |
372 // if _iload, wait to rewrite to iload2. We only want to rewrite the | |
373 // last two iloads in a pair. Comparing against fast_iload means that | |
374 // the next bytecode is neither an iload or a caload, and therefore | |
375 // an iload pair. | |
376 __ cmpl(rbx, Bytecodes::_iload); | |
377 __ jcc(Assembler::equal, done); | |
378 | |
379 __ cmpl(rbx, Bytecodes::_fast_iload); | |
380 __ movl(bc, Bytecodes::_fast_iload2); | |
381 __ jccb(Assembler::equal, rewrite); | |
382 | |
383 // if _caload, rewrite to fast_icaload | |
384 __ cmpl(rbx, Bytecodes::_caload); | |
385 __ movl(bc, Bytecodes::_fast_icaload); | |
386 __ jccb(Assembler::equal, rewrite); | |
387 | |
388 // rewrite so iload doesn't check again. | |
389 __ movl(bc, Bytecodes::_fast_iload); | |
390 | |
391 // rewrite | |
392 // bc: fast bytecode | |
393 __ bind(rewrite); | |
394 patch_bytecode(Bytecodes::_iload, bc, rbx, false); | |
395 __ bind(done); | |
396 } | |
397 | |
398 // Get the local value into tos | |
399 locals_index(rbx); | |
400 __ movl(rax, iaddress(rbx)); | |
401 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
402 } | |
403 | |
404 void TemplateTable::fast_iload2() { | |
405 transition(vtos, itos); | |
406 locals_index(rbx); | |
407 __ movl(rax, iaddress(rbx)); | |
408 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
409 __ push(itos); | |
410 locals_index(rbx, 3); | |
411 __ movl(rax, iaddress(rbx)); | |
412 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
413 } | |
414 | |
415 void TemplateTable::fast_iload() { | |
416 transition(vtos, itos); | |
417 locals_index(rbx); | |
418 __ movl(rax, iaddress(rbx)); | |
419 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
420 } | |
421 | |
422 void TemplateTable::lload() { | |
423 transition(vtos, ltos); | |
424 locals_index(rbx); | |
425 __ movq(rax, laddress(rbx)); | |
426 debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); | |
427 } | |
428 | |
429 void TemplateTable::fload() { | |
430 transition(vtos, ftos); | |
431 locals_index(rbx); | |
432 __ movflt(xmm0, faddress(rbx)); | |
433 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
434 } | |
435 | |
436 void TemplateTable::dload() { | |
437 transition(vtos, dtos); | |
438 locals_index(rbx); | |
439 __ movdbl(xmm0, daddress(rbx)); | |
440 debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); | |
441 } | |
442 | |
443 void TemplateTable::aload() { | |
444 transition(vtos, atos); | |
445 locals_index(rbx); | |
446 __ movq(rax, aaddress(rbx)); | |
447 debug_only(__ verify_local_tag(frame::TagReference, rbx)); | |
448 } | |
449 | |
450 void TemplateTable::locals_index_wide(Register reg) { | |
451 __ movl(reg, at_bcp(2)); | |
452 __ bswapl(reg); | |
453 __ shrl(reg, 16); | |
454 __ negq(reg); | |
455 if (TaggedStackInterpreter) __ shlq(reg, 1); // index = index*2 | |
456 } | |
457 | |
458 void TemplateTable::wide_iload() { | |
459 transition(vtos, itos); | |
460 locals_index_wide(rbx); | |
461 __ movl(rax, iaddress(rbx)); | |
462 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
463 } | |
464 | |
465 void TemplateTable::wide_lload() { | |
466 transition(vtos, ltos); | |
467 locals_index_wide(rbx); | |
468 __ movq(rax, laddress(rbx)); | |
469 debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); | |
470 } | |
471 | |
472 void TemplateTable::wide_fload() { | |
473 transition(vtos, ftos); | |
474 locals_index_wide(rbx); | |
475 __ movflt(xmm0, faddress(rbx)); | |
476 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
477 } | |
478 | |
479 void TemplateTable::wide_dload() { | |
480 transition(vtos, dtos); | |
481 locals_index_wide(rbx); | |
482 __ movdbl(xmm0, daddress(rbx)); | |
483 debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); | |
484 } | |
485 | |
486 void TemplateTable::wide_aload() { | |
487 transition(vtos, atos); | |
488 locals_index_wide(rbx); | |
489 __ movq(rax, aaddress(rbx)); | |
490 debug_only(__ verify_local_tag(frame::TagReference, rbx)); | |
491 } | |
492 | |
493 void TemplateTable::index_check(Register array, Register index) { | |
494 // destroys rbx | |
495 // check array | |
496 __ null_check(array, arrayOopDesc::length_offset_in_bytes()); | |
497 // sign extend index for use by indexed load | |
498 __ movslq(index, index); | |
499 // check index | |
500 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); | |
501 if (index != rbx) { | |
502 // ??? convention: move aberrant index into ebx for exception message | |
503 assert(rbx != array, "different registers"); | |
504 __ movl(rbx, index); | |
505 } | |
506 __ jump_cc(Assembler::aboveEqual, | |
507 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry)); | |
508 } | |
509 | |
510 void TemplateTable::iaload() { | |
511 transition(itos, itos); | |
512 __ pop_ptr(rdx); | |
513 // eax: index | |
514 // rdx: array | |
515 index_check(rdx, rax); // kills rbx | |
516 __ movl(rax, Address(rdx, rax, | |
517 Address::times_4, | |
518 arrayOopDesc::base_offset_in_bytes(T_INT))); | |
519 } | |
520 | |
521 void TemplateTable::laload() { | |
522 transition(itos, ltos); | |
523 __ pop_ptr(rdx); | |
524 // eax: index | |
525 // rdx: array | |
526 index_check(rdx, rax); // kills rbx | |
527 __ movq(rax, Address(rdx, rbx, | |
528 Address::times_8, | |
529 arrayOopDesc::base_offset_in_bytes(T_LONG))); | |
530 } | |
531 | |
532 void TemplateTable::faload() { | |
533 transition(itos, ftos); | |
534 __ pop_ptr(rdx); | |
535 // eax: index | |
536 // rdx: array | |
537 index_check(rdx, rax); // kills rbx | |
538 __ movflt(xmm0, Address(rdx, rax, | |
539 Address::times_4, | |
540 arrayOopDesc::base_offset_in_bytes(T_FLOAT))); | |
541 } | |
542 | |
543 void TemplateTable::daload() { | |
544 transition(itos, dtos); | |
545 __ pop_ptr(rdx); | |
546 // eax: index | |
547 // rdx: array | |
548 index_check(rdx, rax); // kills rbx | |
549 __ movdbl(xmm0, Address(rdx, rax, | |
550 Address::times_8, | |
551 arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); | |
552 } | |
553 | |
554 void TemplateTable::aaload() { | |
555 transition(itos, atos); | |
556 __ pop_ptr(rdx); | |
557 // eax: index | |
558 // rdx: array | |
559 index_check(rdx, rax); // kills rbx | |
560 __ movq(rax, Address(rdx, rax, | |
561 Address::times_8, | |
562 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); | |
563 } | |
564 | |
565 void TemplateTable::baload() { | |
566 transition(itos, itos); | |
567 __ pop_ptr(rdx); | |
568 // eax: index | |
569 // rdx: array | |
570 index_check(rdx, rax); // kills rbx | |
571 __ load_signed_byte(rax, | |
572 Address(rdx, rax, | |
573 Address::times_1, | |
574 arrayOopDesc::base_offset_in_bytes(T_BYTE))); | |
575 } | |
576 | |
577 void TemplateTable::caload() { | |
578 transition(itos, itos); | |
579 __ pop_ptr(rdx); | |
580 // eax: index | |
581 // rdx: array | |
582 index_check(rdx, rax); // kills rbx | |
583 __ load_unsigned_word(rax, | |
584 Address(rdx, rax, | |
585 Address::times_2, | |
586 arrayOopDesc::base_offset_in_bytes(T_CHAR))); | |
587 } | |
588 | |
589 // iload followed by caload frequent pair | |
590 void TemplateTable::fast_icaload() { | |
591 transition(vtos, itos); | |
592 // load index out of locals | |
593 locals_index(rbx); | |
594 __ movl(rax, iaddress(rbx)); | |
595 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
596 | |
597 // eax: index | |
598 // rdx: array | |
599 __ pop_ptr(rdx); | |
600 index_check(rdx, rax); // kills rbx | |
601 __ load_unsigned_word(rax, | |
602 Address(rdx, rax, | |
603 Address::times_2, | |
604 arrayOopDesc::base_offset_in_bytes(T_CHAR))); | |
605 } | |
606 | |
607 void TemplateTable::saload() { | |
608 transition(itos, itos); | |
609 __ pop_ptr(rdx); | |
610 // eax: index | |
611 // rdx: array | |
612 index_check(rdx, rax); // kills rbx | |
613 __ load_signed_word(rax, | |
614 Address(rdx, rax, | |
615 Address::times_2, | |
616 arrayOopDesc::base_offset_in_bytes(T_SHORT))); | |
617 } | |
618 | |
619 void TemplateTable::iload(int n) { | |
620 transition(vtos, itos); | |
621 __ movl(rax, iaddress(n)); | |
622 debug_only(__ verify_local_tag(frame::TagValue, n)); | |
623 } | |
624 | |
625 void TemplateTable::lload(int n) { | |
626 transition(vtos, ltos); | |
627 __ movq(rax, laddress(n)); | |
628 debug_only(__ verify_local_tag(frame::TagCategory2, n)); | |
629 } | |
630 | |
631 void TemplateTable::fload(int n) { | |
632 transition(vtos, ftos); | |
633 __ movflt(xmm0, faddress(n)); | |
634 debug_only(__ verify_local_tag(frame::TagValue, n)); | |
635 } | |
636 | |
637 void TemplateTable::dload(int n) { | |
638 transition(vtos, dtos); | |
639 __ movdbl(xmm0, daddress(n)); | |
640 debug_only(__ verify_local_tag(frame::TagCategory2, n)); | |
641 } | |
642 | |
643 void TemplateTable::aload(int n) { | |
644 transition(vtos, atos); | |
645 __ movq(rax, aaddress(n)); | |
646 debug_only(__ verify_local_tag(frame::TagReference, n)); | |
647 } | |
648 | |
649 void TemplateTable::aload_0() { | |
650 transition(vtos, atos); | |
651 // According to bytecode histograms, the pairs: | |
652 // | |
653 // _aload_0, _fast_igetfield | |
654 // _aload_0, _fast_agetfield | |
655 // _aload_0, _fast_fgetfield | |
656 // | |
657 // occur frequently. If RewriteFrequentPairs is set, the (slow) | |
658 // _aload_0 bytecode checks if the next bytecode is either | |
659 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then | |
660 // rewrites the current bytecode into a pair bytecode; otherwise it | |
661 // rewrites the current bytecode into _fast_aload_0 that doesn't do | |
662 // the pair check anymore. | |
663 // | |
664 // Note: If the next bytecode is _getfield, the rewrite must be | |
665 // delayed, otherwise we may miss an opportunity for a pair. | |
666 // | |
667 // Also rewrite frequent pairs | |
668 // aload_0, aload_1 | |
669 // aload_0, iload_1 | |
670 // These bytecodes with a small amount of code are most profitable | |
671 // to rewrite | |
672 if (RewriteFrequentPairs) { | |
673 Label rewrite, done; | |
674 const Register bc = c_rarg3; | |
675 assert(rbx != bc, "register damaged"); | |
676 // get next byte | |
677 __ load_unsigned_byte(rbx, | |
678 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); | |
679 | |
680 // do actual aload_0 | |
681 aload(0); | |
682 | |
683 // if _getfield then wait with rewrite | |
684 __ cmpl(rbx, Bytecodes::_getfield); | |
685 __ jcc(Assembler::equal, done); | |
686 | |
687 // if _igetfield then reqrite to _fast_iaccess_0 | |
688 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == | |
689 Bytecodes::_aload_0, | |
690 "fix bytecode definition"); | |
691 __ cmpl(rbx, Bytecodes::_fast_igetfield); | |
692 __ movl(bc, Bytecodes::_fast_iaccess_0); | |
693 __ jccb(Assembler::equal, rewrite); | |
694 | |
695 // if _agetfield then reqrite to _fast_aaccess_0 | |
696 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == | |
697 Bytecodes::_aload_0, | |
698 "fix bytecode definition"); | |
699 __ cmpl(rbx, Bytecodes::_fast_agetfield); | |
700 __ movl(bc, Bytecodes::_fast_aaccess_0); | |
701 __ jccb(Assembler::equal, rewrite); | |
702 | |
703 // if _fgetfield then reqrite to _fast_faccess_0 | |
704 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == | |
705 Bytecodes::_aload_0, | |
706 "fix bytecode definition"); | |
707 __ cmpl(rbx, Bytecodes::_fast_fgetfield); | |
708 __ movl(bc, Bytecodes::_fast_faccess_0); | |
709 __ jccb(Assembler::equal, rewrite); | |
710 | |
711 // else rewrite to _fast_aload0 | |
712 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == | |
713 Bytecodes::_aload_0, | |
714 "fix bytecode definition"); | |
715 __ movl(bc, Bytecodes::_fast_aload_0); | |
716 | |
717 // rewrite | |
718 // bc: fast bytecode | |
719 __ bind(rewrite); | |
720 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false); | |
721 | |
722 __ bind(done); | |
723 } else { | |
724 aload(0); | |
725 } | |
726 } | |
727 | |
728 void TemplateTable::istore() { | |
729 transition(itos, vtos); | |
730 locals_index(rbx); | |
731 __ movl(iaddress(rbx), rax); | |
732 __ tag_local(frame::TagValue, rbx); | |
733 } | |
734 | |
735 void TemplateTable::lstore() { | |
736 transition(ltos, vtos); | |
737 locals_index(rbx); | |
738 __ movq(laddress(rbx), rax); | |
739 __ tag_local(frame::TagCategory2, rbx); | |
740 } | |
741 | |
742 void TemplateTable::fstore() { | |
743 transition(ftos, vtos); | |
744 locals_index(rbx); | |
745 __ movflt(faddress(rbx), xmm0); | |
746 __ tag_local(frame::TagValue, rbx); | |
747 } | |
748 | |
749 void TemplateTable::dstore() { | |
750 transition(dtos, vtos); | |
751 locals_index(rbx); | |
752 __ movdbl(daddress(rbx), xmm0); | |
753 __ tag_local(frame::TagCategory2, rbx); | |
754 } | |
755 | |
756 void TemplateTable::astore() { | |
757 transition(vtos, vtos); | |
758 __ pop_ptr(rax, rdx); // will need to pop tag too | |
759 locals_index(rbx); | |
760 __ movq(aaddress(rbx), rax); | |
761 __ tag_local(rdx, rbx); // store tag from stack, might be returnAddr | |
762 } | |
763 | |
764 void TemplateTable::wide_istore() { | |
765 transition(vtos, vtos); | |
766 __ pop_i(); | |
767 locals_index_wide(rbx); | |
768 __ movl(iaddress(rbx), rax); | |
769 __ tag_local(frame::TagValue, rbx); | |
770 } | |
771 | |
772 void TemplateTable::wide_lstore() { | |
773 transition(vtos, vtos); | |
774 __ pop_l(); | |
775 locals_index_wide(rbx); | |
776 __ movq(laddress(rbx), rax); | |
777 __ tag_local(frame::TagCategory2, rbx); | |
778 } | |
779 | |
780 void TemplateTable::wide_fstore() { | |
781 transition(vtos, vtos); | |
782 __ pop_f(); | |
783 locals_index_wide(rbx); | |
784 __ movflt(faddress(rbx), xmm0); | |
785 __ tag_local(frame::TagValue, rbx); | |
786 } | |
787 | |
788 void TemplateTable::wide_dstore() { | |
789 transition(vtos, vtos); | |
790 __ pop_d(); | |
791 locals_index_wide(rbx); | |
792 __ movdbl(daddress(rbx), xmm0); | |
793 __ tag_local(frame::TagCategory2, rbx); | |
794 } | |
795 | |
796 void TemplateTable::wide_astore() { | |
797 transition(vtos, vtos); | |
798 __ pop_ptr(rax, rdx); // will need to pop tag too | |
799 locals_index_wide(rbx); | |
800 __ movq(aaddress(rbx), rax); | |
801 __ tag_local(rdx, rbx); // store tag from stack, might be returnAddr | |
802 } | |
803 | |
804 void TemplateTable::iastore() { | |
805 transition(itos, vtos); | |
806 __ pop_i(rbx); | |
807 __ pop_ptr(rdx); | |
808 // eax: value | |
809 // ebx: index | |
810 // rdx: array | |
811 index_check(rdx, rbx); // prefer index in ebx | |
812 __ movl(Address(rdx, rbx, | |
813 Address::times_4, | |
814 arrayOopDesc::base_offset_in_bytes(T_INT)), | |
815 rax); | |
816 } | |
817 | |
818 void TemplateTable::lastore() { | |
819 transition(ltos, vtos); | |
820 __ pop_i(rbx); | |
821 __ pop_ptr(rdx); | |
822 // rax: value | |
823 // ebx: index | |
824 // rdx: array | |
825 index_check(rdx, rbx); // prefer index in ebx | |
826 __ movq(Address(rdx, rbx, | |
827 Address::times_8, | |
828 arrayOopDesc::base_offset_in_bytes(T_LONG)), | |
829 rax); | |
830 } | |
831 | |
832 void TemplateTable::fastore() { | |
833 transition(ftos, vtos); | |
834 __ pop_i(rbx); | |
835 __ pop_ptr(rdx); | |
836 // xmm0: value | |
837 // ebx: index | |
838 // rdx: array | |
839 index_check(rdx, rbx); // prefer index in ebx | |
840 __ movflt(Address(rdx, rbx, | |
841 Address::times_4, | |
842 arrayOopDesc::base_offset_in_bytes(T_FLOAT)), | |
843 xmm0); | |
844 } | |
845 | |
846 void TemplateTable::dastore() { | |
847 transition(dtos, vtos); | |
848 __ pop_i(rbx); | |
849 __ pop_ptr(rdx); | |
850 // xmm0: value | |
851 // ebx: index | |
852 // rdx: array | |
853 index_check(rdx, rbx); // prefer index in ebx | |
854 __ movdbl(Address(rdx, rbx, | |
855 Address::times_8, | |
856 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)), | |
857 xmm0); | |
858 } | |
859 | |
860 void TemplateTable::aastore() { | |
861 Label is_null, ok_is_subtype, done; | |
862 transition(vtos, vtos); | |
863 // stack: ..., array, index, value | |
864 __ movq(rax, at_tos()); // value | |
865 __ movl(rcx, at_tos_p1()); // index | |
866 __ movq(rdx, at_tos_p2()); // array | |
867 index_check(rdx, rcx); // kills rbx | |
868 // do array store check - check for NULL value first | |
869 __ testq(rax, rax); | |
870 __ jcc(Assembler::zero, is_null); | |
871 | |
872 // Move subklass into rbx | |
873 __ movq(rbx, Address(rax, oopDesc::klass_offset_in_bytes())); | |
874 // Move superklass into rax | |
875 __ movq(rax, Address(rdx, oopDesc::klass_offset_in_bytes())); | |
876 __ movq(rax, Address(rax, | |
877 sizeof(oopDesc) + | |
878 objArrayKlass::element_klass_offset_in_bytes())); | |
879 // Compress array + index*8 + 12 into a single register. Frees rcx. | |
880 __ leaq(rdx, Address(rdx, rcx, | |
881 Address::times_8, | |
882 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); | |
883 | |
884 // Generate subtype check. Blows rcx, rdi | |
885 // Superklass in rax. Subklass in rbx. | |
886 __ gen_subtype_check(rbx, ok_is_subtype); | |
887 | |
888 // Come here on failure | |
889 // object is at TOS | |
890 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); | |
891 | |
892 // Come here on success | |
893 __ bind(ok_is_subtype); | |
894 __ movq(rax, at_tos()); // Value | |
895 __ movq(Address(rdx, 0), rax); | |
896 __ store_check(rdx); | |
897 __ jmp(done); | |
898 | |
899 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] | |
900 __ bind(is_null); | |
901 __ profile_null_seen(rbx); | |
902 __ movq(Address(rdx, rcx, | |
903 Address::times_8, | |
904 arrayOopDesc::base_offset_in_bytes(T_OBJECT)), | |
905 rax); | |
906 | |
907 // Pop stack arguments | |
908 __ bind(done); | |
909 __ addq(rsp, 3 * Interpreter::stackElementSize()); | |
910 } | |
911 | |
912 void TemplateTable::bastore() { | |
913 transition(itos, vtos); | |
914 __ pop_i(rbx); | |
915 __ pop_ptr(rdx); | |
916 // eax: value | |
917 // ebx: index | |
918 // rdx: array | |
919 index_check(rdx, rbx); // prefer index in ebx | |
920 __ movb(Address(rdx, rbx, | |
921 Address::times_1, | |
922 arrayOopDesc::base_offset_in_bytes(T_BYTE)), | |
923 rax); | |
924 } | |
925 | |
926 void TemplateTable::castore() { | |
927 transition(itos, vtos); | |
928 __ pop_i(rbx); | |
929 __ pop_ptr(rdx); | |
930 // eax: value | |
931 // ebx: index | |
932 // rdx: array | |
933 index_check(rdx, rbx); // prefer index in ebx | |
934 __ movw(Address(rdx, rbx, | |
935 Address::times_2, | |
936 arrayOopDesc::base_offset_in_bytes(T_CHAR)), | |
937 rax); | |
938 } | |
939 | |
940 void TemplateTable::sastore() { | |
941 castore(); | |
942 } | |
943 | |
944 void TemplateTable::istore(int n) { | |
945 transition(itos, vtos); | |
946 __ movl(iaddress(n), rax); | |
947 __ tag_local(frame::TagValue, n); | |
948 } | |
949 | |
950 void TemplateTable::lstore(int n) { | |
951 transition(ltos, vtos); | |
952 __ movq(laddress(n), rax); | |
953 __ tag_local(frame::TagCategory2, n); | |
954 } | |
955 | |
956 void TemplateTable::fstore(int n) { | |
957 transition(ftos, vtos); | |
958 __ movflt(faddress(n), xmm0); | |
959 __ tag_local(frame::TagValue, n); | |
960 } | |
961 | |
962 void TemplateTable::dstore(int n) { | |
963 transition(dtos, vtos); | |
964 __ movdbl(daddress(n), xmm0); | |
965 __ tag_local(frame::TagCategory2, n); | |
966 } | |
967 | |
968 void TemplateTable::astore(int n) { | |
969 transition(vtos, vtos); | |
970 __ pop_ptr(rax, rdx); | |
971 __ movq(aaddress(n), rax); | |
972 __ tag_local(rdx, n); | |
973 } | |
974 | |
975 void TemplateTable::pop() { | |
976 transition(vtos, vtos); | |
977 __ addq(rsp, Interpreter::stackElementSize()); | |
978 } | |
979 | |
980 void TemplateTable::pop2() { | |
981 transition(vtos, vtos); | |
982 __ addq(rsp, 2 * Interpreter::stackElementSize()); | |
983 } | |
984 | |
985 void TemplateTable::dup() { | |
986 transition(vtos, vtos); | |
987 __ load_ptr_and_tag(0, rax, rdx); | |
988 __ push_ptr(rax, rdx); | |
989 // stack: ..., a, a | |
990 } | |
991 | |
992 void TemplateTable::dup_x1() { | |
993 transition(vtos, vtos); | |
994 // stack: ..., a, b | |
995 __ load_ptr_and_tag(0, rax, rdx); // load b | |
996 __ load_ptr_and_tag(1, rcx, rbx); // load a | |
997 __ store_ptr_and_tag(1, rax, rdx); // store b | |
998 __ store_ptr_and_tag(0, rcx, rbx); // store a | |
999 __ push_ptr(rax, rdx); // push b | |
1000 // stack: ..., b, a, b | |
1001 } | |
1002 | |
1003 void TemplateTable::dup_x2() { | |
1004 transition(vtos, vtos); | |
1005 // stack: ..., a, b, c | |
1006 __ load_ptr_and_tag(0, rax, rdx); // load c | |
1007 __ load_ptr_and_tag(2, rcx, rbx); // load a | |
1008 __ store_ptr_and_tag(2, rax, rdx); // store c in a | |
1009 __ push_ptr(rax, rdx); // push c | |
1010 // stack: ..., c, b, c, c | |
1011 __ load_ptr_and_tag(2, rax, rdx); // load b | |
1012 __ store_ptr_and_tag(2, rcx, rbx); // store a in b | |
1013 // stack: ..., c, a, c, c | |
1014 __ store_ptr_and_tag(1, rax, rdx); // store b in c | |
1015 // stack: ..., c, a, b, c | |
1016 } | |
1017 | |
1018 void TemplateTable::dup2() { | |
1019 transition(vtos, vtos); | |
1020 // stack: ..., a, b | |
1021 __ load_ptr_and_tag(1, rax, rdx); // load a | |
1022 __ push_ptr(rax, rdx); // push a | |
1023 __ load_ptr_and_tag(1, rax, rdx); // load b | |
1024 __ push_ptr(rax, rdx); // push b | |
1025 // stack: ..., a, b, a, b | |
1026 } | |
1027 | |
1028 void TemplateTable::dup2_x1() { | |
1029 transition(vtos, vtos); | |
1030 // stack: ..., a, b, c | |
1031 __ load_ptr_and_tag(0, rcx, rbx); // load c | |
1032 __ load_ptr_and_tag(1, rax, rdx); // load b | |
1033 __ push_ptr(rax, rdx); // push b | |
1034 __ push_ptr(rcx, rbx); // push c | |
1035 // stack: ..., a, b, c, b, c | |
1036 __ store_ptr_and_tag(3, rcx, rbx); // store c in b | |
1037 // stack: ..., a, c, c, b, c | |
1038 __ load_ptr_and_tag(4, rcx, rbx); // load a | |
1039 __ store_ptr_and_tag(2, rcx, rbx); // store a in 2nd c | |
1040 // stack: ..., a, c, a, b, c | |
1041 __ store_ptr_and_tag(4, rax, rdx); // store b in a | |
1042 // stack: ..., b, c, a, b, c | |
1043 } | |
1044 | |
1045 void TemplateTable::dup2_x2() { | |
1046 transition(vtos, vtos); | |
1047 // stack: ..., a, b, c, d | |
1048 __ load_ptr_and_tag(0, rcx, rbx); // load d | |
1049 __ load_ptr_and_tag(1, rax, rdx); // load c | |
1050 __ push_ptr(rax, rdx); // push c | |
1051 __ push_ptr(rcx, rbx); // push d | |
1052 // stack: ..., a, b, c, d, c, d | |
1053 __ load_ptr_and_tag(4, rax, rdx); // load b | |
1054 __ store_ptr_and_tag(2, rax, rdx); // store b in d | |
1055 __ store_ptr_and_tag(4, rcx, rbx); // store d in b | |
1056 // stack: ..., a, d, c, b, c, d | |
1057 __ load_ptr_and_tag(5, rcx, rbx); // load a | |
1058 __ load_ptr_and_tag(3, rax, rdx); // load c | |
1059 __ store_ptr_and_tag(3, rcx, rbx); // store a in c | |
1060 __ store_ptr_and_tag(5, rax, rdx); // store c in a | |
1061 // stack: ..., c, d, a, b, c, d | |
1062 } | |
1063 | |
1064 void TemplateTable::swap() { | |
1065 transition(vtos, vtos); | |
1066 // stack: ..., a, b | |
1067 __ load_ptr_and_tag(1, rcx, rbx); // load a | |
1068 __ load_ptr_and_tag(0, rax, rdx); // load b | |
1069 __ store_ptr_and_tag(0, rcx, rbx); // store a in b | |
1070 __ store_ptr_and_tag(1, rax, rdx); // store b in a | |
1071 // stack: ..., b, a | |
1072 } | |
1073 | |
1074 void TemplateTable::iop2(Operation op) { | |
1075 transition(itos, itos); | |
1076 switch (op) { | |
1077 case add : __ pop_i(rdx); __ addl (rax, rdx); break; | |
1078 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break; | |
1079 case mul : __ pop_i(rdx); __ imull(rax, rdx); break; | |
1080 case _and : __ pop_i(rdx); __ andl (rax, rdx); break; | |
1081 case _or : __ pop_i(rdx); __ orl (rax, rdx); break; | |
1082 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break; | |
1083 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break; | |
1084 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break; | |
1085 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break; | |
1086 default : ShouldNotReachHere(); | |
1087 } | |
1088 } | |
1089 | |
1090 void TemplateTable::lop2(Operation op) { | |
1091 transition(ltos, ltos); | |
1092 switch (op) { | |
1093 case add : __ pop_l(rdx); __ addq (rax, rdx); break; | |
1094 case sub : __ movq(rdx, rax); __ pop_l(rax); __ subq (rax, rdx); break; | |
1095 case _and : __ pop_l(rdx); __ andq (rax, rdx); break; | |
1096 case _or : __ pop_l(rdx); __ orq (rax, rdx); break; | |
1097 case _xor : __ pop_l(rdx); __ xorq (rax, rdx); break; | |
1098 default : ShouldNotReachHere(); | |
1099 } | |
1100 } | |
1101 | |
1102 void TemplateTable::idiv() { | |
1103 transition(itos, itos); | |
1104 __ movl(rcx, rax); | |
1105 __ pop_i(rax); | |
1106 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If | |
1107 // they are not equal, one could do a normal division (no correction | |
1108 // needed), which may speed up this implementation for the common case. | |
1109 // (see also JVM spec., p.243 & p.271) | |
1110 __ corrected_idivl(rcx); | |
1111 } | |
1112 | |
1113 void TemplateTable::irem() { | |
1114 transition(itos, itos); | |
1115 __ movl(rcx, rax); | |
1116 __ pop_i(rax); | |
1117 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If | |
1118 // they are not equal, one could do a normal division (no correction | |
1119 // needed), which may speed up this implementation for the common case. | |
1120 // (see also JVM spec., p.243 & p.271) | |
1121 __ corrected_idivl(rcx); | |
1122 __ movl(rax, rdx); | |
1123 } | |
1124 | |
1125 void TemplateTable::lmul() { | |
1126 transition(ltos, ltos); | |
1127 __ pop_l(rdx); | |
1128 __ imulq(rax, rdx); | |
1129 } | |
1130 | |
1131 void TemplateTable::ldiv() { | |
1132 transition(ltos, ltos); | |
1133 __ movq(rcx, rax); | |
1134 __ pop_l(rax); | |
1135 // generate explicit div0 check | |
1136 __ testq(rcx, rcx); | |
1137 __ jump_cc(Assembler::zero, | |
1138 ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); | |
1139 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If | |
1140 // they are not equal, one could do a normal division (no correction | |
1141 // needed), which may speed up this implementation for the common case. | |
1142 // (see also JVM spec., p.243 & p.271) | |
1143 __ corrected_idivq(rcx); // kills rbx | |
1144 } | |
1145 | |
1146 void TemplateTable::lrem() { | |
1147 transition(ltos, ltos); | |
1148 __ movq(rcx, rax); | |
1149 __ pop_l(rax); | |
1150 __ testq(rcx, rcx); | |
1151 __ jump_cc(Assembler::zero, | |
1152 ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); | |
1153 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If | |
1154 // they are not equal, one could do a normal division (no correction | |
1155 // needed), which may speed up this implementation for the common case. | |
1156 // (see also JVM spec., p.243 & p.271) | |
1157 __ corrected_idivq(rcx); // kills rbx | |
1158 __ movq(rax, rdx); | |
1159 } | |
1160 | |
1161 void TemplateTable::lshl() { | |
1162 transition(itos, ltos); | |
1163 __ movl(rcx, rax); // get shift count | |
1164 __ pop_l(rax); // get shift value | |
1165 __ shlq(rax); | |
1166 } | |
1167 | |
1168 void TemplateTable::lshr() { | |
1169 transition(itos, ltos); | |
1170 __ movl(rcx, rax); // get shift count | |
1171 __ pop_l(rax); // get shift value | |
1172 __ sarq(rax); | |
1173 } | |
1174 | |
1175 void TemplateTable::lushr() { | |
1176 transition(itos, ltos); | |
1177 __ movl(rcx, rax); // get shift count | |
1178 __ pop_l(rax); // get shift value | |
1179 __ shrq(rax); | |
1180 } | |
1181 | |
1182 void TemplateTable::fop2(Operation op) { | |
1183 transition(ftos, ftos); | |
1184 switch (op) { | |
1185 case add: | |
1186 __ addss(xmm0, at_rsp()); | |
1187 __ addq(rsp, Interpreter::stackElementSize()); | |
1188 break; | |
1189 case sub: | |
1190 __ movflt(xmm1, xmm0); | |
1191 __ pop_f(xmm0); | |
1192 __ subss(xmm0, xmm1); | |
1193 break; | |
1194 case mul: | |
1195 __ mulss(xmm0, at_rsp()); | |
1196 __ addq(rsp, Interpreter::stackElementSize()); | |
1197 break; | |
1198 case div: | |
1199 __ movflt(xmm1, xmm0); | |
1200 __ pop_f(xmm0); | |
1201 __ divss(xmm0, xmm1); | |
1202 break; | |
1203 case rem: | |
1204 __ movflt(xmm1, xmm0); | |
1205 __ pop_f(xmm0); | |
1206 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2); | |
1207 break; | |
1208 default: | |
1209 ShouldNotReachHere(); | |
1210 break; | |
1211 } | |
1212 } | |
1213 | |
1214 void TemplateTable::dop2(Operation op) { | |
1215 transition(dtos, dtos); | |
1216 switch (op) { | |
1217 case add: | |
1218 __ addsd(xmm0, at_rsp()); | |
1219 __ addq(rsp, 2 * Interpreter::stackElementSize()); | |
1220 break; | |
1221 case sub: | |
1222 __ movdbl(xmm1, xmm0); | |
1223 __ pop_d(xmm0); | |
1224 __ subsd(xmm0, xmm1); | |
1225 break; | |
1226 case mul: | |
1227 __ mulsd(xmm0, at_rsp()); | |
1228 __ addq(rsp, 2 * Interpreter::stackElementSize()); | |
1229 break; | |
1230 case div: | |
1231 __ movdbl(xmm1, xmm0); | |
1232 __ pop_d(xmm0); | |
1233 __ divsd(xmm0, xmm1); | |
1234 break; | |
1235 case rem: | |
1236 __ movdbl(xmm1, xmm0); | |
1237 __ pop_d(xmm0); | |
1238 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2); | |
1239 break; | |
1240 default: | |
1241 ShouldNotReachHere(); | |
1242 break; | |
1243 } | |
1244 } | |
1245 | |
1246 void TemplateTable::ineg() { | |
1247 transition(itos, itos); | |
1248 __ negl(rax); | |
1249 } | |
1250 | |
1251 void TemplateTable::lneg() { | |
1252 transition(ltos, ltos); | |
1253 __ negq(rax); | |
1254 } | |
1255 | |
1256 // Note: 'double' and 'long long' have 32-bits alignment on x86. | |
1257 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { | |
1258 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address | |
1259 // of 128-bits operands for SSE instructions. | |
1260 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF))); | |
1261 // Store the value to a 128-bits operand. | |
1262 operand[0] = lo; | |
1263 operand[1] = hi; | |
1264 return operand; | |
1265 } | |
1266 | |
1267 // Buffer for 128-bits masks used by SSE instructions. | |
1268 static jlong float_signflip_pool[2*2]; | |
1269 static jlong double_signflip_pool[2*2]; | |
1270 | |
1271 void TemplateTable::fneg() { | |
1272 transition(ftos, ftos); | |
1273 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000); | |
1274 __ xorps(xmm0, ExternalAddress((address) float_signflip)); | |
1275 } | |
1276 | |
1277 void TemplateTable::dneg() { | |
1278 transition(dtos, dtos); | |
1279 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000); | |
1280 __ xorpd(xmm0, ExternalAddress((address) double_signflip)); | |
1281 } | |
1282 | |
1283 void TemplateTable::iinc() { | |
1284 transition(vtos, vtos); | |
1285 __ load_signed_byte(rdx, at_bcp(2)); // get constant | |
1286 locals_index(rbx); | |
1287 __ addl(iaddress(rbx), rdx); | |
1288 } | |
1289 | |
1290 void TemplateTable::wide_iinc() { | |
1291 transition(vtos, vtos); | |
1292 __ movl(rdx, at_bcp(4)); // get constant | |
1293 locals_index_wide(rbx); | |
1294 __ bswapl(rdx); // swap bytes & sign-extend constant | |
1295 __ sarl(rdx, 16); | |
1296 __ addl(iaddress(rbx), rdx); | |
1297 // Note: should probably use only one movl to get both | |
1298 // the index and the constant -> fix this | |
1299 } | |
1300 | |
1301 void TemplateTable::convert() { | |
1302 // Checking | |
1303 #ifdef ASSERT | |
1304 { | |
1305 TosState tos_in = ilgl; | |
1306 TosState tos_out = ilgl; | |
1307 switch (bytecode()) { | |
1308 case Bytecodes::_i2l: // fall through | |
1309 case Bytecodes::_i2f: // fall through | |
1310 case Bytecodes::_i2d: // fall through | |
1311 case Bytecodes::_i2b: // fall through | |
1312 case Bytecodes::_i2c: // fall through | |
1313 case Bytecodes::_i2s: tos_in = itos; break; | |
1314 case Bytecodes::_l2i: // fall through | |
1315 case Bytecodes::_l2f: // fall through | |
1316 case Bytecodes::_l2d: tos_in = ltos; break; | |
1317 case Bytecodes::_f2i: // fall through | |
1318 case Bytecodes::_f2l: // fall through | |
1319 case Bytecodes::_f2d: tos_in = ftos; break; | |
1320 case Bytecodes::_d2i: // fall through | |
1321 case Bytecodes::_d2l: // fall through | |
1322 case Bytecodes::_d2f: tos_in = dtos; break; | |
1323 default : ShouldNotReachHere(); | |
1324 } | |
1325 switch (bytecode()) { | |
1326 case Bytecodes::_l2i: // fall through | |
1327 case Bytecodes::_f2i: // fall through | |
1328 case Bytecodes::_d2i: // fall through | |
1329 case Bytecodes::_i2b: // fall through | |
1330 case Bytecodes::_i2c: // fall through | |
1331 case Bytecodes::_i2s: tos_out = itos; break; | |
1332 case Bytecodes::_i2l: // fall through | |
1333 case Bytecodes::_f2l: // fall through | |
1334 case Bytecodes::_d2l: tos_out = ltos; break; | |
1335 case Bytecodes::_i2f: // fall through | |
1336 case Bytecodes::_l2f: // fall through | |
1337 case Bytecodes::_d2f: tos_out = ftos; break; | |
1338 case Bytecodes::_i2d: // fall through | |
1339 case Bytecodes::_l2d: // fall through | |
1340 case Bytecodes::_f2d: tos_out = dtos; break; | |
1341 default : ShouldNotReachHere(); | |
1342 } | |
1343 transition(tos_in, tos_out); | |
1344 } | |
1345 #endif // ASSERT | |
1346 | |
1347 static const int64_t is_nan = 0x8000000000000000L; | |
1348 | |
1349 // Conversion | |
1350 switch (bytecode()) { | |
1351 case Bytecodes::_i2l: | |
1352 __ movslq(rax, rax); | |
1353 break; | |
1354 case Bytecodes::_i2f: | |
1355 __ cvtsi2ssl(xmm0, rax); | |
1356 break; | |
1357 case Bytecodes::_i2d: | |
1358 __ cvtsi2sdl(xmm0, rax); | |
1359 break; | |
1360 case Bytecodes::_i2b: | |
1361 __ movsbl(rax, rax); | |
1362 break; | |
1363 case Bytecodes::_i2c: | |
1364 __ movzwl(rax, rax); | |
1365 break; | |
1366 case Bytecodes::_i2s: | |
1367 __ movswl(rax, rax); | |
1368 break; | |
1369 case Bytecodes::_l2i: | |
1370 __ movl(rax, rax); | |
1371 break; | |
1372 case Bytecodes::_l2f: | |
1373 __ cvtsi2ssq(xmm0, rax); | |
1374 break; | |
1375 case Bytecodes::_l2d: | |
1376 __ cvtsi2sdq(xmm0, rax); | |
1377 break; | |
1378 case Bytecodes::_f2i: | |
1379 { | |
1380 Label L; | |
1381 __ cvttss2sil(rax, xmm0); | |
1382 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? | |
1383 __ jcc(Assembler::notEqual, L); | |
1384 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1); | |
1385 __ bind(L); | |
1386 } | |
1387 break; | |
1388 case Bytecodes::_f2l: | |
1389 { | |
1390 Label L; | |
1391 __ cvttss2siq(rax, xmm0); | |
1392 // NaN or overflow/underflow? | |
1393 __ cmp64(rax, ExternalAddress((address) &is_nan)); | |
1394 __ jcc(Assembler::notEqual, L); | |
1395 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); | |
1396 __ bind(L); | |
1397 } | |
1398 break; | |
1399 case Bytecodes::_f2d: | |
1400 __ cvtss2sd(xmm0, xmm0); | |
1401 break; | |
1402 case Bytecodes::_d2i: | |
1403 { | |
1404 Label L; | |
1405 __ cvttsd2sil(rax, xmm0); | |
1406 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? | |
1407 __ jcc(Assembler::notEqual, L); | |
1408 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1); | |
1409 __ bind(L); | |
1410 } | |
1411 break; | |
1412 case Bytecodes::_d2l: | |
1413 { | |
1414 Label L; | |
1415 __ cvttsd2siq(rax, xmm0); | |
1416 // NaN or overflow/underflow? | |
1417 __ cmp64(rax, ExternalAddress((address) &is_nan)); | |
1418 __ jcc(Assembler::notEqual, L); | |
1419 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1); | |
1420 __ bind(L); | |
1421 } | |
1422 break; | |
1423 case Bytecodes::_d2f: | |
1424 __ cvtsd2ss(xmm0, xmm0); | |
1425 break; | |
1426 default: | |
1427 ShouldNotReachHere(); | |
1428 } | |
1429 } | |
1430 | |
1431 void TemplateTable::lcmp() { | |
1432 transition(ltos, itos); | |
1433 Label done; | |
1434 __ pop_l(rdx); | |
1435 __ cmpq(rdx, rax); | |
1436 __ movl(rax, -1); | |
1437 __ jccb(Assembler::less, done); | |
1438 __ setb(Assembler::notEqual, rax); | |
1439 __ movzbl(rax, rax); | |
1440 __ bind(done); | |
1441 } | |
1442 | |
1443 void TemplateTable::float_cmp(bool is_float, int unordered_result) { | |
1444 Label done; | |
1445 if (is_float) { | |
1446 // XXX get rid of pop here, use ... reg, mem32 | |
1447 __ pop_f(xmm1); | |
1448 __ ucomiss(xmm1, xmm0); | |
1449 } else { | |
1450 // XXX get rid of pop here, use ... reg, mem64 | |
1451 __ pop_d(xmm1); | |
1452 __ ucomisd(xmm1, xmm0); | |
1453 } | |
1454 if (unordered_result < 0) { | |
1455 __ movl(rax, -1); | |
1456 __ jccb(Assembler::parity, done); | |
1457 __ jccb(Assembler::below, done); | |
1458 __ setb(Assembler::notEqual, rdx); | |
1459 __ movzbl(rax, rdx); | |
1460 } else { | |
1461 __ movl(rax, 1); | |
1462 __ jccb(Assembler::parity, done); | |
1463 __ jccb(Assembler::above, done); | |
1464 __ movl(rax, 0); | |
1465 __ jccb(Assembler::equal, done); | |
1466 __ decrementl(rax); | |
1467 } | |
1468 __ bind(done); | |
1469 } | |
1470 | |
1471 void TemplateTable::branch(bool is_jsr, bool is_wide) { | |
1472 __ get_method(rcx); // rcx holds method | |
1473 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx | |
1474 // holds bumped taken count | |
1475 | |
1476 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + | |
1477 InvocationCounter::counter_offset(); | |
1478 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + | |
1479 InvocationCounter::counter_offset(); | |
1480 const int method_offset = frame::interpreter_frame_method_offset * wordSize; | |
1481 | |
1482 // Load up edx with the branch displacement | |
1483 __ movl(rdx, at_bcp(1)); | |
1484 __ bswapl(rdx); | |
1485 | |
1486 if (!is_wide) { | |
1487 __ sarl(rdx, 16); | |
1488 } | |
1489 __ movslq(rdx, rdx); | |
1490 | |
1491 // Handle all the JSR stuff here, then exit. | |
1492 // It's much shorter and cleaner than intermingling with the non-JSR | |
1493 // normal-branch stuff occuring below. | |
1494 if (is_jsr) { | |
1495 // Pre-load the next target bytecode into rbx | |
1496 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0)); | |
1497 | |
1498 // compute return address as bci in rax | |
1499 __ leaq(rax, at_bcp((is_wide ? 5 : 3) - | |
1500 in_bytes(constMethodOopDesc::codes_offset()))); | |
1501 __ subq(rax, Address(rcx, methodOopDesc::const_offset())); | |
1502 // Adjust the bcp in r13 by the displacement in rdx | |
1503 __ addq(r13, rdx); | |
1504 // jsr returns atos that is not an oop | |
1505 __ push_i(rax); | |
1506 __ dispatch_only(vtos); | |
1507 return; | |
1508 } | |
1509 | |
1510 // Normal (non-jsr) branch handling | |
1511 | |
1512 // Adjust the bcp in r13 by the displacement in rdx | |
1513 __ addq(r13, rdx); | |
1514 | |
1515 assert(UseLoopCounter || !UseOnStackReplacement, | |
1516 "on-stack-replacement requires loop counters"); | |
1517 Label backedge_counter_overflow; | |
1518 Label profile_method; | |
1519 Label dispatch; | |
1520 if (UseLoopCounter) { | |
1521 // increment backedge counter for backward branches | |
1522 // rax: MDO | |
1523 // ebx: MDO bumped taken-count | |
1524 // rcx: method | |
1525 // rdx: target offset | |
1526 // r13: target bcp | |
1527 // r14: locals pointer | |
1528 __ testl(rdx, rdx); // check if forward or backward branch | |
1529 __ jcc(Assembler::positive, dispatch); // count only if backward branch | |
1530 | |
1531 // increment counter | |
1532 __ movl(rax, Address(rcx, be_offset)); // load backedge counter | |
1533 __ incrementl(rax, InvocationCounter::count_increment); // increment | |
1534 // counter | |
1535 __ movl(Address(rcx, be_offset), rax); // store counter | |
1536 | |
1537 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter | |
1538 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits | |
1539 __ addl(rax, Address(rcx, be_offset)); // add both counters | |
1540 | |
1541 if (ProfileInterpreter) { | |
1542 // Test to see if we should create a method data oop | |
1543 __ cmp32(rax, | |
1544 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit)); | |
1545 __ jcc(Assembler::less, dispatch); | |
1546 | |
1547 // if no method data exists, go to profile method | |
1548 __ test_method_data_pointer(rax, profile_method); | |
1549 | |
1550 if (UseOnStackReplacement) { | |
1551 // check for overflow against ebx which is the MDO taken count | |
1552 __ cmp32(rbx, | |
1553 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); | |
1554 __ jcc(Assembler::below, dispatch); | |
1555 | |
1556 // When ProfileInterpreter is on, the backedge_count comes | |
1557 // from the methodDataOop, which value does not get reset on | |
1558 // the call to frequency_counter_overflow(). To avoid | |
1559 // excessive calls to the overflow routine while the method is | |
1560 // being compiled, add a second test to make sure the overflow | |
1561 // function is called only once every overflow_frequency. | |
1562 const int overflow_frequency = 1024; | |
1563 __ andl(rbx, overflow_frequency - 1); | |
1564 __ jcc(Assembler::zero, backedge_counter_overflow); | |
1565 | |
1566 } | |
1567 } else { | |
1568 if (UseOnStackReplacement) { | |
1569 // check for overflow against eax, which is the sum of the | |
1570 // counters | |
1571 __ cmp32(rax, | |
1572 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); | |
1573 __ jcc(Assembler::aboveEqual, backedge_counter_overflow); | |
1574 | |
1575 } | |
1576 } | |
1577 __ bind(dispatch); | |
1578 } | |
1579 | |
1580 // Pre-load the next target bytecode into rbx | |
1581 __ load_unsigned_byte(rbx, Address(r13, 0)); | |
1582 | |
1583 // continue with the bytecode @ target | |
1584 // eax: return bci for jsr's, unused otherwise | |
1585 // ebx: target bytecode | |
1586 // r13: target bcp | |
1587 __ dispatch_only(vtos); | |
1588 | |
1589 if (UseLoopCounter) { | |
1590 if (ProfileInterpreter) { | |
1591 // Out-of-line code to allocate method data oop. | |
1592 __ bind(profile_method); | |
1593 __ call_VM(noreg, | |
1594 CAST_FROM_FN_PTR(address, | |
1595 InterpreterRuntime::profile_method), r13); | |
1596 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode | |
1597 __ movq(rcx, Address(rbp, method_offset)); | |
1598 __ movq(rcx, Address(rcx, | |
1599 in_bytes(methodOopDesc::method_data_offset()))); | |
1600 __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), | |
1601 rcx); | |
1602 __ test_method_data_pointer(rcx, dispatch); | |
1603 // offset non-null mdp by MDO::data_offset() + IR::profile_method() | |
1604 __ addq(rcx, in_bytes(methodDataOopDesc::data_offset())); | |
1605 __ addq(rcx, rax); | |
1606 __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), | |
1607 rcx); | |
1608 __ jmp(dispatch); | |
1609 } | |
1610 | |
1611 if (UseOnStackReplacement) { | |
1612 // invocation counter overflow | |
1613 __ bind(backedge_counter_overflow); | |
1614 __ negq(rdx); | |
1615 __ addq(rdx, r13); // branch bcp | |
1616 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) | |
1617 __ call_VM(noreg, | |
1618 CAST_FROM_FN_PTR(address, | |
1619 InterpreterRuntime::frequency_counter_overflow), | |
1620 rdx); | |
1621 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode | |
1622 | |
1623 // rax: osr nmethod (osr ok) or NULL (osr not possible) | |
1624 // ebx: target bytecode | |
1625 // rdx: scratch | |
1626 // r14: locals pointer | |
1627 // r13: bcp | |
1628 __ testq(rax, rax); // test result | |
1629 __ jcc(Assembler::zero, dispatch); // no osr if null | |
1630 // nmethod may have been invalidated (VM may block upon call_VM return) | |
1631 __ movl(rcx, Address(rax, nmethod::entry_bci_offset())); | |
1632 __ cmpl(rcx, InvalidOSREntryBci); | |
1633 __ jcc(Assembler::equal, dispatch); | |
1634 | |
1635 // We have the address of an on stack replacement routine in eax | |
1636 // We need to prepare to execute the OSR method. First we must | |
1637 // migrate the locals and monitors off of the stack. | |
1638 | |
1639 __ movq(r13, rax); // save the nmethod | |
1640 | |
1641 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); | |
1642 | |
1643 // eax is OSR buffer, move it to expected parameter location | |
1644 __ movq(j_rarg0, rax); | |
1645 | |
1646 // We use j_rarg definitions here so that registers don't conflict as parameter | |
1647 // registers change across platforms as we are in the midst of a calling | |
1648 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters. | |
1649 | |
1650 const Register retaddr = j_rarg2; | |
1651 const Register sender_sp = j_rarg1; | |
1652 | |
1653 // pop the interpreter frame | |
1654 __ movq(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp | |
1655 __ leave(); // remove frame anchor | |
1656 __ popq(retaddr); // get return address | |
1657 __ movq(rsp, sender_sp); // set sp to sender sp | |
1658 // Ensure compiled code always sees stack at proper alignment | |
1659 __ andq(rsp, -(StackAlignmentInBytes)); | |
1660 | |
1661 // unlike x86 we need no specialized return from compiled code | |
1662 // to the interpreter or the call stub. | |
1663 | |
1664 // push the return address | |
1665 __ pushq(retaddr); | |
1666 | |
1667 // and begin the OSR nmethod | |
1668 __ jmp(Address(r13, nmethod::osr_entry_point_offset())); | |
1669 } | |
1670 } | |
1671 } | |
1672 | |
1673 | |
1674 void TemplateTable::if_0cmp(Condition cc) { | |
1675 transition(itos, vtos); | |
1676 // assume branch is more often taken than not (loops use backward branches) | |
1677 Label not_taken; | |
1678 __ testl(rax, rax); | |
1679 __ jcc(j_not(cc), not_taken); | |
1680 branch(false, false); | |
1681 __ bind(not_taken); | |
1682 __ profile_not_taken_branch(rax); | |
1683 } | |
1684 | |
1685 void TemplateTable::if_icmp(Condition cc) { | |
1686 transition(itos, vtos); | |
1687 // assume branch is more often taken than not (loops use backward branches) | |
1688 Label not_taken; | |
1689 __ pop_i(rdx); | |
1690 __ cmpl(rdx, rax); | |
1691 __ jcc(j_not(cc), not_taken); | |
1692 branch(false, false); | |
1693 __ bind(not_taken); | |
1694 __ profile_not_taken_branch(rax); | |
1695 } | |
1696 | |
1697 void TemplateTable::if_nullcmp(Condition cc) { | |
1698 transition(atos, vtos); | |
1699 // assume branch is more often taken than not (loops use backward branches) | |
1700 Label not_taken; | |
1701 __ testq(rax, rax); | |
1702 __ jcc(j_not(cc), not_taken); | |
1703 branch(false, false); | |
1704 __ bind(not_taken); | |
1705 __ profile_not_taken_branch(rax); | |
1706 } | |
1707 | |
1708 void TemplateTable::if_acmp(Condition cc) { | |
1709 transition(atos, vtos); | |
1710 // assume branch is more often taken than not (loops use backward branches) | |
1711 Label not_taken; | |
1712 __ pop_ptr(rdx); | |
1713 __ cmpq(rdx, rax); | |
1714 __ jcc(j_not(cc), not_taken); | |
1715 branch(false, false); | |
1716 __ bind(not_taken); | |
1717 __ profile_not_taken_branch(rax); | |
1718 } | |
1719 | |
1720 void TemplateTable::ret() { | |
1721 transition(vtos, vtos); | |
1722 locals_index(rbx); | |
1723 __ movq(rbx, aaddress(rbx)); // get return bci, compute return bcp | |
1724 __ profile_ret(rbx, rcx); | |
1725 __ get_method(rax); | |
1726 __ movq(r13, Address(rax, methodOopDesc::const_offset())); | |
1727 __ leaq(r13, Address(r13, rbx, Address::times_1, | |
1728 constMethodOopDesc::codes_offset())); | |
1729 __ dispatch_next(vtos); | |
1730 } | |
1731 | |
1732 void TemplateTable::wide_ret() { | |
1733 transition(vtos, vtos); | |
1734 locals_index_wide(rbx); | |
1735 __ movq(rbx, aaddress(rbx)); // get return bci, compute return bcp | |
1736 __ profile_ret(rbx, rcx); | |
1737 __ get_method(rax); | |
1738 __ movq(r13, Address(rax, methodOopDesc::const_offset())); | |
1739 __ leaq(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset())); | |
1740 __ dispatch_next(vtos); | |
1741 } | |
1742 | |
1743 void TemplateTable::tableswitch() { | |
1744 Label default_case, continue_execution; | |
1745 transition(itos, vtos); | |
1746 // align r13 | |
1747 __ leaq(rbx, at_bcp(BytesPerInt)); | |
1748 __ andq(rbx, -BytesPerInt); | |
1749 // load lo & hi | |
1750 __ movl(rcx, Address(rbx, BytesPerInt)); | |
1751 __ movl(rdx, Address(rbx, 2 * BytesPerInt)); | |
1752 __ bswapl(rcx); | |
1753 __ bswapl(rdx); | |
1754 // check against lo & hi | |
1755 __ cmpl(rax, rcx); | |
1756 __ jcc(Assembler::less, default_case); | |
1757 __ cmpl(rax, rdx); | |
1758 __ jcc(Assembler::greater, default_case); | |
1759 // lookup dispatch offset | |
1760 __ subl(rax, rcx); | |
1761 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt)); | |
1762 __ profile_switch_case(rax, rbx, rcx); | |
1763 // continue execution | |
1764 __ bind(continue_execution); | |
1765 __ bswapl(rdx); | |
1766 __ movslq(rdx, rdx); | |
1767 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); | |
1768 __ addq(r13, rdx); | |
1769 __ dispatch_only(vtos); | |
1770 // handle default | |
1771 __ bind(default_case); | |
1772 __ profile_switch_default(rax); | |
1773 __ movl(rdx, Address(rbx, 0)); | |
1774 __ jmp(continue_execution); | |
1775 } | |
1776 | |
1777 void TemplateTable::lookupswitch() { | |
1778 transition(itos, itos); | |
1779 __ stop("lookupswitch bytecode should have been rewritten"); | |
1780 } | |
1781 | |
1782 void TemplateTable::fast_linearswitch() { | |
1783 transition(itos, vtos); | |
1784 Label loop_entry, loop, found, continue_execution; | |
1785 // bswap rax so we can avoid bswapping the table entries | |
1786 __ bswapl(rax); | |
1787 // align r13 | |
1788 __ leaq(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of | |
1789 // this instruction (change offsets | |
1790 // below) | |
1791 __ andq(rbx, -BytesPerInt); | |
1792 // set counter | |
1793 __ movl(rcx, Address(rbx, BytesPerInt)); | |
1794 __ bswapl(rcx); | |
1795 __ jmpb(loop_entry); | |
1796 // table search | |
1797 __ bind(loop); | |
1798 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt)); | |
1799 __ jcc(Assembler::equal, found); | |
1800 __ bind(loop_entry); | |
1801 __ decrementl(rcx); | |
1802 __ jcc(Assembler::greaterEqual, loop); | |
1803 // default case | |
1804 __ profile_switch_default(rax); | |
1805 __ movl(rdx, Address(rbx, 0)); | |
1806 __ jmp(continue_execution); | |
1807 // entry found -> get offset | |
1808 __ bind(found); | |
1809 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt)); | |
1810 __ profile_switch_case(rcx, rax, rbx); | |
1811 // continue execution | |
1812 __ bind(continue_execution); | |
1813 __ bswapl(rdx); | |
1814 __ movslq(rdx, rdx); | |
1815 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); | |
1816 __ addq(r13, rdx); | |
1817 __ dispatch_only(vtos); | |
1818 } | |
1819 | |
1820 void TemplateTable::fast_binaryswitch() { | |
1821 transition(itos, vtos); | |
1822 // Implementation using the following core algorithm: | |
1823 // | |
1824 // int binary_search(int key, LookupswitchPair* array, int n) { | |
1825 // // Binary search according to "Methodik des Programmierens" by | |
1826 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. | |
1827 // int i = 0; | |
1828 // int j = n; | |
1829 // while (i+1 < j) { | |
1830 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) | |
1831 // // with Q: for all i: 0 <= i < n: key < a[i] | |
1832 // // where a stands for the array and assuming that the (inexisting) | |
1833 // // element a[n] is infinitely big. | |
1834 // int h = (i + j) >> 1; | |
1835 // // i < h < j | |
1836 // if (key < array[h].fast_match()) { | |
1837 // j = h; | |
1838 // } else { | |
1839 // i = h; | |
1840 // } | |
1841 // } | |
1842 // // R: a[i] <= key < a[i+1] or Q | |
1843 // // (i.e., if key is within array, i is the correct index) | |
1844 // return i; | |
1845 // } | |
1846 | |
1847 // Register allocation | |
1848 const Register key = rax; // already set (tosca) | |
1849 const Register array = rbx; | |
1850 const Register i = rcx; | |
1851 const Register j = rdx; | |
1852 const Register h = rdi; | |
1853 const Register temp = rsi; | |
1854 | |
1855 // Find array start | |
1856 __ leaq(array, at_bcp(3 * BytesPerInt)); // btw: should be able to | |
1857 // get rid of this | |
1858 // instruction (change | |
1859 // offsets below) | |
1860 __ andq(array, -BytesPerInt); | |
1861 | |
1862 // Initialize i & j | |
1863 __ xorl(i, i); // i = 0; | |
1864 __ movl(j, Address(array, -BytesPerInt)); // j = length(array); | |
1865 | |
1866 // Convert j into native byteordering | |
1867 __ bswapl(j); | |
1868 | |
1869 // And start | |
1870 Label entry; | |
1871 __ jmp(entry); | |
1872 | |
1873 // binary search loop | |
1874 { | |
1875 Label loop; | |
1876 __ bind(loop); | |
1877 // int h = (i + j) >> 1; | |
1878 __ leal(h, Address(i, j, Address::times_1)); // h = i + j; | |
1879 __ sarl(h, 1); // h = (i + j) >> 1; | |
1880 // if (key < array[h].fast_match()) { | |
1881 // j = h; | |
1882 // } else { | |
1883 // i = h; | |
1884 // } | |
1885 // Convert array[h].match to native byte-ordering before compare | |
1886 __ movl(temp, Address(array, h, Address::times_8)); | |
1887 __ bswapl(temp); | |
1888 __ cmpl(key, temp); | |
1889 // j = h if (key < array[h].fast_match()) | |
1890 __ cmovl(Assembler::less, j, h); | |
1891 // i = h if (key >= array[h].fast_match()) | |
1892 __ cmovl(Assembler::greaterEqual, i, h); | |
1893 // while (i+1 < j) | |
1894 __ bind(entry); | |
1895 __ leal(h, Address(i, 1)); // i+1 | |
1896 __ cmpl(h, j); // i+1 < j | |
1897 __ jcc(Assembler::less, loop); | |
1898 } | |
1899 | |
1900 // end of binary search, result index is i (must check again!) | |
1901 Label default_case; | |
1902 // Convert array[i].match to native byte-ordering before compare | |
1903 __ movl(temp, Address(array, i, Address::times_8)); | |
1904 __ bswapl(temp); | |
1905 __ cmpl(key, temp); | |
1906 __ jcc(Assembler::notEqual, default_case); | |
1907 | |
1908 // entry found -> j = offset | |
1909 __ movl(j , Address(array, i, Address::times_8, BytesPerInt)); | |
1910 __ profile_switch_case(i, key, array); | |
1911 __ bswapl(j); | |
1912 __ movslq(j, j); | |
1913 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); | |
1914 __ addq(r13, j); | |
1915 __ dispatch_only(vtos); | |
1916 | |
1917 // default case -> j = default offset | |
1918 __ bind(default_case); | |
1919 __ profile_switch_default(i); | |
1920 __ movl(j, Address(array, -2 * BytesPerInt)); | |
1921 __ bswapl(j); | |
1922 __ movslq(j, j); | |
1923 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); | |
1924 __ addq(r13, j); | |
1925 __ dispatch_only(vtos); | |
1926 } | |
1927 | |
1928 | |
1929 void TemplateTable::_return(TosState state) { | |
1930 transition(state, state); | |
1931 assert(_desc->calls_vm(), | |
1932 "inconsistent calls_vm information"); // call in remove_activation | |
1933 | |
1934 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { | |
1935 assert(state == vtos, "only valid state"); | |
1936 __ movq(c_rarg1, aaddress(0)); | |
1937 __ movq(rdi, Address(c_rarg1, oopDesc::klass_offset_in_bytes())); | |
1938 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); | |
1939 __ testl(rdi, JVM_ACC_HAS_FINALIZER); | |
1940 Label skip_register_finalizer; | |
1941 __ jcc(Assembler::zero, skip_register_finalizer); | |
1942 | |
1943 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1); | |
1944 | |
1945 __ bind(skip_register_finalizer); | |
1946 } | |
1947 | |
1948 __ remove_activation(state, r13); | |
1949 __ jmp(r13); | |
1950 } | |
1951 | |
1952 // ---------------------------------------------------------------------------- | |
1953 // Volatile variables demand their effects be made known to all CPU's | |
1954 // in order. Store buffers on most chips allow reads & writes to | |
1955 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode | |
1956 // without some kind of memory barrier (i.e., it's not sufficient that | |
1957 // the interpreter does not reorder volatile references, the hardware | |
1958 // also must not reorder them). | |
1959 // | |
1960 // According to the new Java Memory Model (JMM): | |
1961 // (1) All volatiles are serialized wrt to each other. ALSO reads & | |
1962 // writes act as aquire & release, so: | |
1963 // (2) A read cannot let unrelated NON-volatile memory refs that | |
1964 // happen after the read float up to before the read. It's OK for | |
1965 // non-volatile memory refs that happen before the volatile read to | |
1966 // float down below it. | |
1967 // (3) Similar a volatile write cannot let unrelated NON-volatile | |
1968 // memory refs that happen BEFORE the write float down to after the | |
1969 // write. It's OK for non-volatile memory refs that happen after the | |
1970 // volatile write to float up before it. | |
1971 // | |
1972 // We only put in barriers around volatile refs (they are expensive), | |
1973 // not _between_ memory refs (that would require us to track the | |
1974 // flavor of the previous memory refs). Requirements (2) and (3) | |
1975 // require some barriers before volatile stores and after volatile | |
1976 // loads. These nearly cover requirement (1) but miss the | |
1977 // volatile-store-volatile-load case. This final case is placed after | |
1978 // volatile-stores although it could just as well go before | |
1979 // volatile-loads. | |
1980 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits | |
1981 order_constraint) { | |
1982 // Helper function to insert a is-volatile test and memory barrier | |
1983 if (os::is_MP()) { // Not needed on single CPU | |
1984 __ membar(order_constraint); | |
1985 } | |
1986 } | |
1987 | |
1988 void TemplateTable::resolve_cache_and_index(int byte_no, | |
1989 Register Rcache, | |
1990 Register index) { | |
1991 assert(byte_no == 1 || byte_no == 2, "byte_no out of range"); | |
1992 | |
1993 const Register temp = rbx; | |
1994 assert_different_registers(Rcache, index, temp); | |
1995 | |
1996 const int shift_count = (1 + byte_no) * BitsPerByte; | |
1997 Label resolved; | |
1998 __ get_cache_and_index_at_bcp(Rcache, index, 1); | |
1999 __ movl(temp, Address(Rcache, | |
2000 index, Address::times_8, | |
2001 constantPoolCacheOopDesc::base_offset() + | |
2002 ConstantPoolCacheEntry::indices_offset())); | |
2003 __ shrl(temp, shift_count); | |
2004 // have we resolved this bytecode? | |
2005 __ andl(temp, 0xFF); | |
2006 __ cmpl(temp, (int) bytecode()); | |
2007 __ jcc(Assembler::equal, resolved); | |
2008 | |
2009 // resolve first time through | |
2010 address entry; | |
2011 switch (bytecode()) { | |
2012 case Bytecodes::_getstatic: | |
2013 case Bytecodes::_putstatic: | |
2014 case Bytecodes::_getfield: | |
2015 case Bytecodes::_putfield: | |
2016 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); | |
2017 break; | |
2018 case Bytecodes::_invokevirtual: | |
2019 case Bytecodes::_invokespecial: | |
2020 case Bytecodes::_invokestatic: | |
2021 case Bytecodes::_invokeinterface: | |
2022 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); | |
2023 break; | |
2024 default: | |
2025 ShouldNotReachHere(); | |
2026 break; | |
2027 } | |
2028 __ movl(temp, (int) bytecode()); | |
2029 __ call_VM(noreg, entry, temp); | |
2030 | |
2031 // Update registers with resolved info | |
2032 __ get_cache_and_index_at_bcp(Rcache, index, 1); | |
2033 __ bind(resolved); | |
2034 } | |
2035 | |
2036 // The Rcache and index registers must be set before call | |
2037 void TemplateTable::load_field_cp_cache_entry(Register obj, | |
2038 Register cache, | |
2039 Register index, | |
2040 Register off, | |
2041 Register flags, | |
2042 bool is_static = false) { | |
2043 assert_different_registers(cache, index, flags, off); | |
2044 | |
2045 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); | |
2046 // Field offset | |
2047 __ movq(off, Address(cache, index, Address::times_8, | |
2048 in_bytes(cp_base_offset + | |
2049 ConstantPoolCacheEntry::f2_offset()))); | |
2050 // Flags | |
2051 __ movl(flags, Address(cache, index, Address::times_8, | |
2052 in_bytes(cp_base_offset + | |
2053 ConstantPoolCacheEntry::flags_offset()))); | |
2054 | |
2055 // klass overwrite register | |
2056 if (is_static) { | |
2057 __ movq(obj, Address(cache, index, Address::times_8, | |
2058 in_bytes(cp_base_offset + | |
2059 ConstantPoolCacheEntry::f1_offset()))); | |
2060 } | |
2061 } | |
2062 | |
2063 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, | |
2064 Register method, | |
2065 Register itable_index, | |
2066 Register flags, | |
2067 bool is_invokevirtual, | |
2068 bool is_invokevfinal /*unused*/) { | |
2069 // setup registers | |
2070 const Register cache = rcx; | |
2071 const Register index = rdx; | |
2072 assert_different_registers(method, flags); | |
2073 assert_different_registers(method, cache, index); | |
2074 assert_different_registers(itable_index, flags); | |
2075 assert_different_registers(itable_index, cache, index); | |
2076 // determine constant pool cache field offsets | |
2077 const int method_offset = in_bytes( | |
2078 constantPoolCacheOopDesc::base_offset() + | |
2079 (is_invokevirtual | |
2080 ? ConstantPoolCacheEntry::f2_offset() | |
2081 : ConstantPoolCacheEntry::f1_offset())); | |
2082 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2083 ConstantPoolCacheEntry::flags_offset()); | |
2084 // access constant pool cache fields | |
2085 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2086 ConstantPoolCacheEntry::f2_offset()); | |
2087 | |
2088 resolve_cache_and_index(byte_no, cache, index); | |
2089 | |
2090 assert(wordSize == 8, "adjust code below"); | |
2091 __ movq(method, Address(cache, index, Address::times_8, method_offset)); | |
2092 if (itable_index != noreg) { | |
2093 __ movq(itable_index, | |
2094 Address(cache, index, Address::times_8, index_offset)); | |
2095 } | |
2096 __ movl(flags , Address(cache, index, Address::times_8, flags_offset)); | |
2097 } | |
2098 | |
2099 | |
2100 // The registers cache and index expected to be set before call. | |
2101 // Correct values of the cache and index registers are preserved. | |
2102 void TemplateTable::jvmti_post_field_access(Register cache, Register index, | |
2103 bool is_static, bool has_tos) { | |
2104 // do the JVMTI work here to avoid disturbing the register state below | |
2105 // We use c_rarg registers here because we want to use the register used in | |
2106 // the call to the VM | |
2107 if (JvmtiExport::can_post_field_access()) { | |
2108 // Check to see if a field access watch has been set before we | |
2109 // take the time to call into the VM. | |
2110 Label L1; | |
2111 assert_different_registers(cache, index, rax); | |
2112 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); | |
2113 __ testl(rax, rax); | |
2114 __ jcc(Assembler::zero, L1); | |
2115 | |
2116 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1); | |
2117 | |
2118 // cache entry pointer | |
2119 __ addq(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset())); | |
2120 __ shll(c_rarg3, LogBytesPerWord); | |
2121 __ addq(c_rarg2, c_rarg3); | |
2122 if (is_static) { | |
2123 __ xorl(c_rarg1, c_rarg1); // NULL object reference | |
2124 } else { | |
2125 __ movq(c_rarg1, at_tos()); // get object pointer without popping it | |
2126 __ verify_oop(c_rarg1); | |
2127 } | |
2128 // c_rarg1: object pointer or NULL | |
2129 // c_rarg2: cache entry pointer | |
2130 // c_rarg3: jvalue object on the stack | |
2131 __ call_VM(noreg, CAST_FROM_FN_PTR(address, | |
2132 InterpreterRuntime::post_field_access), | |
2133 c_rarg1, c_rarg2, c_rarg3); | |
2134 __ get_cache_and_index_at_bcp(cache, index, 1); | |
2135 __ bind(L1); | |
2136 } | |
2137 } | |
2138 | |
2139 void TemplateTable::pop_and_check_object(Register r) { | |
2140 __ pop_ptr(r); | |
2141 __ null_check(r); // for field access must check obj. | |
2142 __ verify_oop(r); | |
2143 } | |
2144 | |
2145 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { | |
2146 transition(vtos, vtos); | |
2147 | |
2148 const Register cache = rcx; | |
2149 const Register index = rdx; | |
2150 const Register obj = c_rarg3; | |
2151 const Register off = rbx; | |
2152 const Register flags = rax; | |
2153 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them | |
2154 | |
2155 resolve_cache_and_index(byte_no, cache, index); | |
2156 jvmti_post_field_access(cache, index, is_static, false); | |
2157 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); | |
2158 | |
2159 if (!is_static) { | |
2160 // obj is on the stack | |
2161 pop_and_check_object(obj); | |
2162 } | |
2163 | |
2164 const Address field(obj, off, Address::times_1); | |
2165 | |
2166 Label Done, notByte, notInt, notShort, notChar, | |
2167 notLong, notFloat, notObj, notDouble; | |
2168 | |
2169 __ shrl(flags, ConstantPoolCacheEntry::tosBits); | |
2170 assert(btos == 0, "change code, btos != 0"); | |
2171 | |
2172 __ andl(flags, 0x0F); | |
2173 __ jcc(Assembler::notZero, notByte); | |
2174 // btos | |
2175 __ load_signed_byte(rax, field); | |
2176 __ push(btos); | |
2177 // Rewrite bytecode to be faster | |
2178 if (!is_static) { | |
2179 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx); | |
2180 } | |
2181 __ jmp(Done); | |
2182 | |
2183 __ bind(notByte); | |
2184 __ cmpl(flags, atos); | |
2185 __ jcc(Assembler::notEqual, notObj); | |
2186 // atos | |
2187 __ movq(rax, field); | |
2188 __ push(atos); | |
2189 if (!is_static) { | |
2190 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); | |
2191 } | |
2192 __ jmp(Done); | |
2193 | |
2194 __ bind(notObj); | |
2195 __ cmpl(flags, itos); | |
2196 __ jcc(Assembler::notEqual, notInt); | |
2197 // itos | |
2198 __ movl(rax, field); | |
2199 __ push(itos); | |
2200 // Rewrite bytecode to be faster | |
2201 if (!is_static) { | |
2202 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx); | |
2203 } | |
2204 __ jmp(Done); | |
2205 | |
2206 __ bind(notInt); | |
2207 __ cmpl(flags, ctos); | |
2208 __ jcc(Assembler::notEqual, notChar); | |
2209 // ctos | |
2210 __ load_unsigned_word(rax, field); | |
2211 __ push(ctos); | |
2212 // Rewrite bytecode to be faster | |
2213 if (!is_static) { | |
2214 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx); | |
2215 } | |
2216 __ jmp(Done); | |
2217 | |
2218 __ bind(notChar); | |
2219 __ cmpl(flags, stos); | |
2220 __ jcc(Assembler::notEqual, notShort); | |
2221 // stos | |
2222 __ load_signed_word(rax, field); | |
2223 __ push(stos); | |
2224 // Rewrite bytecode to be faster | |
2225 if (!is_static) { | |
2226 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx); | |
2227 } | |
2228 __ jmp(Done); | |
2229 | |
2230 __ bind(notShort); | |
2231 __ cmpl(flags, ltos); | |
2232 __ jcc(Assembler::notEqual, notLong); | |
2233 // ltos | |
2234 __ movq(rax, field); | |
2235 __ push(ltos); | |
2236 // Rewrite bytecode to be faster | |
2237 if (!is_static) { | |
2238 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx); | |
2239 } | |
2240 __ jmp(Done); | |
2241 | |
2242 __ bind(notLong); | |
2243 __ cmpl(flags, ftos); | |
2244 __ jcc(Assembler::notEqual, notFloat); | |
2245 // ftos | |
2246 __ movflt(xmm0, field); | |
2247 __ push(ftos); | |
2248 // Rewrite bytecode to be faster | |
2249 if (!is_static) { | |
2250 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx); | |
2251 } | |
2252 __ jmp(Done); | |
2253 | |
2254 __ bind(notFloat); | |
2255 #ifdef ASSERT | |
2256 __ cmpl(flags, dtos); | |
2257 __ jcc(Assembler::notEqual, notDouble); | |
2258 #endif | |
2259 // dtos | |
2260 __ movdbl(xmm0, field); | |
2261 __ push(dtos); | |
2262 // Rewrite bytecode to be faster | |
2263 if (!is_static) { | |
2264 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx); | |
2265 } | |
2266 #ifdef ASSERT | |
2267 __ jmp(Done); | |
2268 | |
2269 __ bind(notDouble); | |
2270 __ stop("Bad state"); | |
2271 #endif | |
2272 | |
2273 __ bind(Done); | |
2274 // [jk] not needed currently | |
2275 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad | | |
2276 // Assembler::LoadStore)); | |
2277 } | |
2278 | |
2279 | |
2280 void TemplateTable::getfield(int byte_no) { | |
2281 getfield_or_static(byte_no, false); | |
2282 } | |
2283 | |
2284 void TemplateTable::getstatic(int byte_no) { | |
2285 getfield_or_static(byte_no, true); | |
2286 } | |
2287 | |
2288 // The registers cache and index expected to be set before call. | |
2289 // The function may destroy various registers, just not the cache and index registers. | |
2290 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { | |
2291 transition(vtos, vtos); | |
2292 | |
2293 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); | |
2294 | |
2295 if (JvmtiExport::can_post_field_modification()) { | |
2296 // Check to see if a field modification watch has been set before | |
2297 // we take the time to call into the VM. | |
2298 Label L1; | |
2299 assert_different_registers(cache, index, rax); | |
2300 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); | |
2301 __ testl(rax, rax); | |
2302 __ jcc(Assembler::zero, L1); | |
2303 | |
2304 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1); | |
2305 | |
2306 if (is_static) { | |
2307 // Life is simple. Null out the object pointer. | |
2308 __ xorl(c_rarg1, c_rarg1); | |
2309 } else { | |
2310 // Life is harder. The stack holds the value on top, followed by | |
2311 // the object. We don't know the size of the value, though; it | |
2312 // could be one or two words depending on its type. As a result, | |
2313 // we must find the type to determine where the object is. | |
2314 __ movl(c_rarg3, Address(c_rarg2, rscratch1, | |
2315 Address::times_8, | |
2316 in_bytes(cp_base_offset + | |
2317 ConstantPoolCacheEntry::flags_offset()))); | |
2318 __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits); | |
2319 // Make sure we don't need to mask rcx for tosBits after the | |
2320 // above shift | |
2321 ConstantPoolCacheEntry::verify_tosBits(); | |
2322 __ movq(c_rarg1, at_tos_p1()); // initially assume a one word jvalue | |
2323 __ cmpl(c_rarg3, ltos); | |
2324 __ cmovq(Assembler::equal, | |
2325 c_rarg1, at_tos_p2()); // ltos (two word jvalue) | |
2326 __ cmpl(c_rarg3, dtos); | |
2327 __ cmovq(Assembler::equal, | |
2328 c_rarg1, at_tos_p2()); // dtos (two word jvalue) | |
2329 } | |
2330 // cache entry pointer | |
2331 __ addq(c_rarg2, in_bytes(cp_base_offset)); | |
2332 __ shll(rscratch1, LogBytesPerWord); | |
2333 __ addq(c_rarg2, rscratch1); | |
2334 // object (tos) | |
2335 __ movq(c_rarg3, rsp); | |
2336 // c_rarg1: object pointer set up above (NULL if static) | |
2337 // c_rarg2: cache entry pointer | |
2338 // c_rarg3: jvalue object on the stack | |
2339 __ call_VM(noreg, | |
2340 CAST_FROM_FN_PTR(address, | |
2341 InterpreterRuntime::post_field_modification), | |
2342 c_rarg1, c_rarg2, c_rarg3); | |
2343 __ get_cache_and_index_at_bcp(cache, index, 1); | |
2344 __ bind(L1); | |
2345 } | |
2346 } | |
2347 | |
2348 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { | |
2349 transition(vtos, vtos); | |
2350 | |
2351 const Register cache = rcx; | |
2352 const Register index = rdx; | |
2353 const Register obj = rcx; | |
2354 const Register off = rbx; | |
2355 const Register flags = rax; | |
2356 const Register bc = c_rarg3; | |
2357 | |
2358 resolve_cache_and_index(byte_no, cache, index); | |
2359 jvmti_post_field_mod(cache, index, is_static); | |
2360 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); | |
2361 | |
2362 // [jk] not needed currently | |
2363 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | | |
2364 // Assembler::StoreStore)); | |
2365 | |
2366 Label notVolatile, Done; | |
2367 __ movl(rdx, flags); | |
2368 __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2369 __ andl(rdx, 0x1); | |
2370 | |
2371 // field address | |
2372 const Address field(obj, off, Address::times_1); | |
2373 | |
2374 Label notByte, notInt, notShort, notChar, | |
2375 notLong, notFloat, notObj, notDouble; | |
2376 | |
2377 __ shrl(flags, ConstantPoolCacheEntry::tosBits); | |
2378 | |
2379 assert(btos == 0, "change code, btos != 0"); | |
2380 __ andl(flags, 0x0f); | |
2381 __ jcc(Assembler::notZero, notByte); | |
2382 // btos | |
2383 __ pop(btos); | |
2384 if (!is_static) pop_and_check_object(obj); | |
2385 __ movb(field, rax); | |
2386 if (!is_static) { | |
2387 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx); | |
2388 } | |
2389 __ jmp(Done); | |
2390 | |
2391 __ bind(notByte); | |
2392 __ cmpl(flags, atos); | |
2393 __ jcc(Assembler::notEqual, notObj); | |
2394 // atos | |
2395 __ pop(atos); | |
2396 if (!is_static) pop_and_check_object(obj); | |
2397 __ movq(field, rax); | |
2398 __ store_check(obj, field); // Need to mark card | |
2399 if (!is_static) { | |
2400 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx); | |
2401 } | |
2402 __ jmp(Done); | |
2403 | |
2404 __ bind(notObj); | |
2405 __ cmpl(flags, itos); | |
2406 __ jcc(Assembler::notEqual, notInt); | |
2407 // itos | |
2408 __ pop(itos); | |
2409 if (!is_static) pop_and_check_object(obj); | |
2410 __ movl(field, rax); | |
2411 if (!is_static) { | |
2412 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx); | |
2413 } | |
2414 __ jmp(Done); | |
2415 | |
2416 __ bind(notInt); | |
2417 __ cmpl(flags, ctos); | |
2418 __ jcc(Assembler::notEqual, notChar); | |
2419 // ctos | |
2420 __ pop(ctos); | |
2421 if (!is_static) pop_and_check_object(obj); | |
2422 __ movw(field, rax); | |
2423 if (!is_static) { | |
2424 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx); | |
2425 } | |
2426 __ jmp(Done); | |
2427 | |
2428 __ bind(notChar); | |
2429 __ cmpl(flags, stos); | |
2430 __ jcc(Assembler::notEqual, notShort); | |
2431 // stos | |
2432 __ pop(stos); | |
2433 if (!is_static) pop_and_check_object(obj); | |
2434 __ movw(field, rax); | |
2435 if (!is_static) { | |
2436 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx); | |
2437 } | |
2438 __ jmp(Done); | |
2439 | |
2440 __ bind(notShort); | |
2441 __ cmpl(flags, ltos); | |
2442 __ jcc(Assembler::notEqual, notLong); | |
2443 // ltos | |
2444 __ pop(ltos); | |
2445 if (!is_static) pop_and_check_object(obj); | |
2446 __ movq(field, rax); | |
2447 if (!is_static) { | |
2448 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx); | |
2449 } | |
2450 __ jmp(Done); | |
2451 | |
2452 __ bind(notLong); | |
2453 __ cmpl(flags, ftos); | |
2454 __ jcc(Assembler::notEqual, notFloat); | |
2455 // ftos | |
2456 __ pop(ftos); | |
2457 if (!is_static) pop_and_check_object(obj); | |
2458 __ movflt(field, xmm0); | |
2459 if (!is_static) { | |
2460 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx); | |
2461 } | |
2462 __ jmp(Done); | |
2463 | |
2464 __ bind(notFloat); | |
2465 #ifdef ASSERT | |
2466 __ cmpl(flags, dtos); | |
2467 __ jcc(Assembler::notEqual, notDouble); | |
2468 #endif | |
2469 // dtos | |
2470 __ pop(dtos); | |
2471 if (!is_static) pop_and_check_object(obj); | |
2472 __ movdbl(field, xmm0); | |
2473 if (!is_static) { | |
2474 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx); | |
2475 } | |
2476 | |
2477 #ifdef ASSERT | |
2478 __ jmp(Done); | |
2479 | |
2480 __ bind(notDouble); | |
2481 __ stop("Bad state"); | |
2482 #endif | |
2483 | |
2484 __ bind(Done); | |
2485 // Check for volatile store | |
2486 __ testl(rdx, rdx); | |
2487 __ jcc(Assembler::zero, notVolatile); | |
2488 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | | |
2489 Assembler::StoreStore)); | |
2490 | |
2491 __ bind(notVolatile); | |
2492 } | |
2493 | |
2494 void TemplateTable::putfield(int byte_no) { | |
2495 putfield_or_static(byte_no, false); | |
2496 } | |
2497 | |
2498 void TemplateTable::putstatic(int byte_no) { | |
2499 putfield_or_static(byte_no, true); | |
2500 } | |
2501 | |
2502 void TemplateTable::jvmti_post_fast_field_mod() { | |
2503 if (JvmtiExport::can_post_field_modification()) { | |
2504 // Check to see if a field modification watch has been set before | |
2505 // we take the time to call into the VM. | |
2506 Label L2; | |
2507 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); | |
2508 __ testl(c_rarg3, c_rarg3); | |
2509 __ jcc(Assembler::zero, L2); | |
2510 __ pop_ptr(rbx); // copy the object pointer from tos | |
2511 __ verify_oop(rbx); | |
2512 __ push_ptr(rbx); // put the object pointer back on tos | |
2513 __ subq(rsp, sizeof(jvalue)); // add space for a jvalue object | |
2514 __ movq(c_rarg3, rsp); | |
2515 const Address field(c_rarg3, 0); | |
2516 | |
2517 switch (bytecode()) { // load values into the jvalue object | |
2518 case Bytecodes::_fast_aputfield: // fall through | |
2519 case Bytecodes::_fast_lputfield: __ movq(field, rax); break; | |
2520 case Bytecodes::_fast_iputfield: __ movl(field, rax); break; | |
2521 case Bytecodes::_fast_bputfield: __ movb(field, rax); break; | |
2522 case Bytecodes::_fast_sputfield: // fall through | |
2523 case Bytecodes::_fast_cputfield: __ movw(field, rax); break; | |
2524 case Bytecodes::_fast_fputfield: __ movflt(field, xmm0); break; | |
2525 case Bytecodes::_fast_dputfield: __ movdbl(field, xmm0); break; | |
2526 default: | |
2527 ShouldNotReachHere(); | |
2528 } | |
2529 | |
2530 // Save rax because call_VM() will clobber it, then use it for | |
2531 // JVMTI purposes | |
2532 __ pushq(rax); | |
2533 // access constant pool cache entry | |
2534 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1); | |
2535 __ verify_oop(rbx); | |
2536 // rbx: object pointer copied above | |
2537 // c_rarg2: cache entry pointer | |
2538 // c_rarg3: jvalue object on the stack | |
2539 __ call_VM(noreg, | |
2540 CAST_FROM_FN_PTR(address, | |
2541 InterpreterRuntime::post_field_modification), | |
2542 rbx, c_rarg2, c_rarg3); | |
2543 __ popq(rax); // restore lower value | |
2544 __ addq(rsp, sizeof(jvalue)); // release jvalue object space | |
2545 __ bind(L2); | |
2546 } | |
2547 } | |
2548 | |
2549 void TemplateTable::fast_storefield(TosState state) { | |
2550 transition(state, vtos); | |
2551 | |
2552 ByteSize base = constantPoolCacheOopDesc::base_offset(); | |
2553 | |
2554 jvmti_post_fast_field_mod(); | |
2555 | |
2556 // access constant pool cache | |
2557 __ get_cache_and_index_at_bcp(rcx, rbx, 1); | |
2558 | |
2559 // test for volatile with rdx | |
2560 __ movl(rdx, Address(rcx, rbx, Address::times_8, | |
2561 in_bytes(base + | |
2562 ConstantPoolCacheEntry::flags_offset()))); | |
2563 | |
2564 // replace index with field offset from cache entry | |
2565 __ movq(rbx, Address(rcx, rbx, Address::times_8, | |
2566 in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); | |
2567 | |
2568 // [jk] not needed currently | |
2569 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | | |
2570 // Assembler::StoreStore)); | |
2571 | |
2572 Label notVolatile; | |
2573 __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2574 __ andl(rdx, 0x1); | |
2575 | |
2576 // Get object from stack | |
2577 pop_and_check_object(rcx); | |
2578 | |
2579 // field address | |
2580 const Address field(rcx, rbx, Address::times_1); | |
2581 | |
2582 // access field | |
2583 switch (bytecode()) { | |
2584 case Bytecodes::_fast_aputfield: | |
2585 __ movq(field, rax); | |
2586 __ store_check(rcx, field); | |
2587 break; | |
2588 case Bytecodes::_fast_lputfield: | |
2589 __ movq(field, rax); | |
2590 break; | |
2591 case Bytecodes::_fast_iputfield: | |
2592 __ movl(field, rax); | |
2593 break; | |
2594 case Bytecodes::_fast_bputfield: | |
2595 __ movb(field, rax); | |
2596 break; | |
2597 case Bytecodes::_fast_sputfield: | |
2598 // fall through | |
2599 case Bytecodes::_fast_cputfield: | |
2600 __ movw(field, rax); | |
2601 break; | |
2602 case Bytecodes::_fast_fputfield: | |
2603 __ movflt(field, xmm0); | |
2604 break; | |
2605 case Bytecodes::_fast_dputfield: | |
2606 __ movdbl(field, xmm0); | |
2607 break; | |
2608 default: | |
2609 ShouldNotReachHere(); | |
2610 } | |
2611 | |
2612 // Check for volatile store | |
2613 __ testl(rdx, rdx); | |
2614 __ jcc(Assembler::zero, notVolatile); | |
2615 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | | |
2616 Assembler::StoreStore)); | |
2617 __ bind(notVolatile); | |
2618 } | |
2619 | |
2620 | |
2621 void TemplateTable::fast_accessfield(TosState state) { | |
2622 transition(atos, state); | |
2623 | |
2624 // Do the JVMTI work here to avoid disturbing the register state below | |
2625 if (JvmtiExport::can_post_field_access()) { | |
2626 // Check to see if a field access watch has been set before we | |
2627 // take the time to call into the VM. | |
2628 Label L1; | |
2629 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); | |
2630 __ testl(rcx, rcx); | |
2631 __ jcc(Assembler::zero, L1); | |
2632 // access constant pool cache entry | |
2633 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1); | |
2634 __ movq(r12, rax); // save object pointer before call_VM() clobbers it | |
2635 __ verify_oop(rax); | |
2636 __ movq(c_rarg1, rax); | |
2637 // c_rarg1: object pointer copied above | |
2638 // c_rarg2: cache entry pointer | |
2639 __ call_VM(noreg, | |
2640 CAST_FROM_FN_PTR(address, | |
2641 InterpreterRuntime::post_field_access), | |
2642 c_rarg1, c_rarg2); | |
2643 __ movq(rax, r12); // restore object pointer | |
2644 __ bind(L1); | |
2645 } | |
2646 | |
2647 // access constant pool cache | |
2648 __ get_cache_and_index_at_bcp(rcx, rbx, 1); | |
2649 // replace index with field offset from cache entry | |
2650 // [jk] not needed currently | |
2651 // if (os::is_MP()) { | |
2652 // __ movl(rdx, Address(rcx, rbx, Address::times_8, | |
2653 // in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2654 // ConstantPoolCacheEntry::flags_offset()))); | |
2655 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2656 // __ andl(rdx, 0x1); | |
2657 // } | |
2658 __ movq(rbx, Address(rcx, rbx, Address::times_8, | |
2659 in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2660 ConstantPoolCacheEntry::f2_offset()))); | |
2661 | |
2662 // rax: object | |
2663 __ verify_oop(rax); | |
2664 __ null_check(rax); | |
2665 Address field(rax, rbx, Address::times_1); | |
2666 | |
2667 // access field | |
2668 switch (bytecode()) { | |
2669 case Bytecodes::_fast_agetfield: | |
2670 __ movq(rax, field); | |
2671 __ verify_oop(rax); | |
2672 break; | |
2673 case Bytecodes::_fast_lgetfield: | |
2674 __ movq(rax, field); | |
2675 break; | |
2676 case Bytecodes::_fast_igetfield: | |
2677 __ movl(rax, field); | |
2678 break; | |
2679 case Bytecodes::_fast_bgetfield: | |
2680 __ movsbl(rax, field); | |
2681 break; | |
2682 case Bytecodes::_fast_sgetfield: | |
2683 __ load_signed_word(rax, field); | |
2684 break; | |
2685 case Bytecodes::_fast_cgetfield: | |
2686 __ load_unsigned_word(rax, field); | |
2687 break; | |
2688 case Bytecodes::_fast_fgetfield: | |
2689 __ movflt(xmm0, field); | |
2690 break; | |
2691 case Bytecodes::_fast_dgetfield: | |
2692 __ movdbl(xmm0, field); | |
2693 break; | |
2694 default: | |
2695 ShouldNotReachHere(); | |
2696 } | |
2697 // [jk] not needed currently | |
2698 // if (os::is_MP()) { | |
2699 // Label notVolatile; | |
2700 // __ testl(rdx, rdx); | |
2701 // __ jcc(Assembler::zero, notVolatile); | |
2702 // __ membar(Assembler::LoadLoad); | |
2703 // __ bind(notVolatile); | |
2704 //}; | |
2705 } | |
2706 | |
2707 void TemplateTable::fast_xaccess(TosState state) { | |
2708 transition(vtos, state); | |
2709 | |
2710 // get receiver | |
2711 __ movq(rax, aaddress(0)); | |
2712 debug_only(__ verify_local_tag(frame::TagReference, 0)); | |
2713 // access constant pool cache | |
2714 __ get_cache_and_index_at_bcp(rcx, rdx, 2); | |
2715 __ movq(rbx, | |
2716 Address(rcx, rdx, Address::times_8, | |
2717 in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2718 ConstantPoolCacheEntry::f2_offset()))); | |
2719 // make sure exception is reported in correct bcp range (getfield is | |
2720 // next instruction) | |
2721 __ incrementq(r13); | |
2722 __ null_check(rax); | |
2723 switch (state) { | |
2724 case itos: | |
2725 __ movl(rax, Address(rax, rbx, Address::times_1)); | |
2726 break; | |
2727 case atos: | |
2728 __ movq(rax, Address(rax, rbx, Address::times_1)); | |
2729 __ verify_oop(rax); | |
2730 break; | |
2731 case ftos: | |
2732 __ movflt(xmm0, Address(rax, rbx, Address::times_1)); | |
2733 break; | |
2734 default: | |
2735 ShouldNotReachHere(); | |
2736 } | |
2737 | |
2738 // [jk] not needed currently | |
2739 // if (os::is_MP()) { | |
2740 // Label notVolatile; | |
2741 // __ movl(rdx, Address(rcx, rdx, Address::times_8, | |
2742 // in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2743 // ConstantPoolCacheEntry::flags_offset()))); | |
2744 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2745 // __ testl(rdx, 0x1); | |
2746 // __ jcc(Assembler::zero, notVolatile); | |
2747 // __ membar(Assembler::LoadLoad); | |
2748 // __ bind(notVolatile); | |
2749 // } | |
2750 | |
2751 __ decrementq(r13); | |
2752 } | |
2753 | |
2754 | |
2755 | |
2756 //----------------------------------------------------------------------------- | |
2757 // Calls | |
2758 | |
2759 void TemplateTable::count_calls(Register method, Register temp) { | |
2760 // implemented elsewhere | |
2761 ShouldNotReachHere(); | |
2762 } | |
2763 | |
2764 void TemplateTable::prepare_invoke(Register method, | |
2765 Register index, | |
2766 int byte_no, | |
2767 Bytecodes::Code code) { | |
2768 // determine flags | |
2769 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; | |
2770 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; | |
2771 const bool is_invokespecial = code == Bytecodes::_invokespecial; | |
2772 const bool load_receiver = code != Bytecodes::_invokestatic; | |
2773 const bool receiver_null_check = is_invokespecial; | |
2774 const bool save_flags = is_invokeinterface || is_invokevirtual; | |
2775 // setup registers & access constant pool cache | |
2776 const Register recv = rcx; | |
2777 const Register flags = rdx; | |
2778 assert_different_registers(method, index, recv, flags); | |
2779 | |
2780 // save 'interpreter return address' | |
2781 __ save_bcp(); | |
2782 | |
2783 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual); | |
2784 | |
2785 // load receiver if needed (note: no return address pushed yet) | |
2786 if (load_receiver) { | |
2787 __ movl(recv, flags); | |
2788 __ andl(recv, 0xFF); | |
2789 if (TaggedStackInterpreter) __ shll(recv, 1); // index*2 | |
2790 __ movq(recv, Address(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1))); | |
2791 __ verify_oop(recv); | |
2792 } | |
2793 | |
2794 // do null check if needed | |
2795 if (receiver_null_check) { | |
2796 __ null_check(recv); | |
2797 } | |
2798 | |
2799 if (save_flags) { | |
2800 __ movl(r13, flags); | |
2801 } | |
2802 | |
2803 // compute return type | |
2804 __ shrl(flags, ConstantPoolCacheEntry::tosBits); | |
2805 // Make sure we don't need to mask flags for tosBits after the above shift | |
2806 ConstantPoolCacheEntry::verify_tosBits(); | |
2807 // load return address | |
2808 { | |
2809 ExternalAddress return_5((address)Interpreter::return_5_addrs_by_index_table()); | |
2810 ExternalAddress return_3((address)Interpreter::return_3_addrs_by_index_table()); | |
2811 __ lea(rscratch1, (is_invokeinterface ? return_5 : return_3)); | |
2812 __ movq(flags, Address(rscratch1, flags, Address::times_8)); | |
2813 } | |
2814 | |
2815 // push return address | |
2816 __ pushq(flags); | |
2817 | |
2818 // Restore flag field from the constant pool cache, and restore esi | |
2819 // for later null checks. r13 is the bytecode pointer | |
2820 if (save_flags) { | |
2821 __ movl(flags, r13); | |
2822 __ restore_bcp(); | |
2823 } | |
2824 } | |
2825 | |
2826 | |
2827 void TemplateTable::invokevirtual_helper(Register index, | |
2828 Register recv, | |
2829 Register flags) { | |
2830 // Uses temporary registers rax, rdx assert_different_registers(index, recv, rax, rdx); | |
2831 | |
2832 // Test for an invoke of a final method | |
2833 Label notFinal; | |
2834 __ movl(rax, flags); | |
2835 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod)); | |
2836 __ jcc(Assembler::zero, notFinal); | |
2837 | |
2838 const Register method = index; // method must be rbx | |
2839 assert(method == rbx, | |
2840 "methodOop must be rbx for interpreter calling convention"); | |
2841 | |
2842 // do the call - the index is actually the method to call | |
2843 __ verify_oop(method); | |
2844 | |
2845 // It's final, need a null check here! | |
2846 __ null_check(recv); | |
2847 | |
2848 // profile this call | |
2849 __ profile_final_call(rax); | |
2850 | |
2851 __ jump_from_interpreted(method, rax); | |
2852 | |
2853 __ bind(notFinal); | |
2854 | |
2855 // get receiver klass | |
2856 __ null_check(recv, oopDesc::klass_offset_in_bytes()); | |
2857 __ movq(rax, Address(recv, oopDesc::klass_offset_in_bytes())); | |
2858 | |
2859 __ verify_oop(rax); | |
2860 | |
2861 // profile this call | |
2862 __ profile_virtual_call(rax, r14, rdx); | |
2863 | |
2864 // get target methodOop & entry point | |
2865 const int base = instanceKlass::vtable_start_offset() * wordSize; | |
2866 assert(vtableEntry::size() * wordSize == 8, | |
2867 "adjust the scaling in the code below"); | |
2868 __ movq(method, Address(rax, index, | |
2869 Address::times_8, | |
2870 base + vtableEntry::method_offset_in_bytes())); | |
2871 __ movq(rdx, Address(method, methodOopDesc::interpreter_entry_offset())); | |
2872 __ jump_from_interpreted(method, rdx); | |
2873 } | |
2874 | |
2875 | |
2876 void TemplateTable::invokevirtual(int byte_no) { | |
2877 transition(vtos, vtos); | |
2878 prepare_invoke(rbx, noreg, byte_no, bytecode()); | |
2879 | |
2880 // rbx: index | |
2881 // rcx: receiver | |
2882 // rdx: flags | |
2883 | |
2884 invokevirtual_helper(rbx, rcx, rdx); | |
2885 } | |
2886 | |
2887 | |
2888 void TemplateTable::invokespecial(int byte_no) { | |
2889 transition(vtos, vtos); | |
2890 prepare_invoke(rbx, noreg, byte_no, bytecode()); | |
2891 // do the call | |
2892 __ verify_oop(rbx); | |
2893 __ profile_call(rax); | |
2894 __ jump_from_interpreted(rbx, rax); | |
2895 } | |
2896 | |
2897 | |
2898 void TemplateTable::invokestatic(int byte_no) { | |
2899 transition(vtos, vtos); | |
2900 prepare_invoke(rbx, noreg, byte_no, bytecode()); | |
2901 // do the call | |
2902 __ verify_oop(rbx); | |
2903 __ profile_call(rax); | |
2904 __ jump_from_interpreted(rbx, rax); | |
2905 } | |
2906 | |
2907 void TemplateTable::fast_invokevfinal(int byte_no) { | |
2908 transition(vtos, vtos); | |
2909 __ stop("fast_invokevfinal not used on amd64"); | |
2910 } | |
2911 | |
2912 void TemplateTable::invokeinterface(int byte_no) { | |
2913 transition(vtos, vtos); | |
2914 prepare_invoke(rax, rbx, byte_no, bytecode()); | |
2915 | |
2916 // rax: Interface | |
2917 // rbx: index | |
2918 // rcx: receiver | |
2919 // rdx: flags | |
2920 | |
2921 // Special case of invokeinterface called for virtual method of | |
2922 // java.lang.Object. See cpCacheOop.cpp for details. | |
2923 // This code isn't produced by javac, but could be produced by | |
2924 // another compliant java compiler. | |
2925 Label notMethod; | |
2926 __ movl(r14, rdx); | |
2927 __ andl(r14, (1 << ConstantPoolCacheEntry::methodInterface)); | |
2928 __ jcc(Assembler::zero, notMethod); | |
2929 | |
2930 invokevirtual_helper(rbx, rcx, rdx); | |
2931 __ bind(notMethod); | |
2932 | |
2933 // Get receiver klass into rdx - also a null check | |
2934 __ restore_locals(); // restore r14 | |
2935 __ movq(rdx, Address(rcx, oopDesc::klass_offset_in_bytes())); | |
2936 __ verify_oop(rdx); | |
2937 | |
2938 // profile this call | |
2939 __ profile_virtual_call(rdx, r13, r14); | |
2940 | |
2941 __ movq(r14, rdx); // Save klassOop in r14 | |
2942 | |
2943 // Compute start of first itableOffsetEntry (which is at the end of | |
2944 // the vtable) | |
2945 const int base = instanceKlass::vtable_start_offset() * wordSize; | |
2946 // Get length of vtable | |
2947 assert(vtableEntry::size() * wordSize == 8, | |
2948 "adjust the scaling in the code below"); | |
2949 __ movl(r13, Address(rdx, | |
2950 instanceKlass::vtable_length_offset() * wordSize)); | |
2951 __ leaq(rdx, Address(rdx, r13, Address::times_8, base)); | |
2952 | |
2953 if (HeapWordsPerLong > 1) { | |
2954 // Round up to align_object_offset boundary | |
2955 __ round_to_q(rdx, BytesPerLong); | |
2956 } | |
2957 | |
2958 Label entry, search, interface_ok; | |
2959 | |
2960 __ jmpb(entry); | |
2961 __ bind(search); | |
2962 __ addq(rdx, itableOffsetEntry::size() * wordSize); | |
2963 | |
2964 __ bind(entry); | |
2965 | |
2966 // Check that the entry is non-null. A null entry means that the | |
2967 // receiver class doesn't implement the interface, and wasn't the | |
2968 // same as the receiver class checked when the interface was | |
2969 // resolved. | |
2970 __ pushq(rdx); | |
2971 __ movq(rdx, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); | |
2972 __ testq(rdx, rdx); | |
2973 __ jcc(Assembler::notZero, interface_ok); | |
2974 // throw exception | |
2975 __ popq(rdx); // pop saved register first. | |
2976 __ popq(rbx); // pop return address (pushed by prepare_invoke) | |
2977 __ restore_bcp(); // r13 must be correct for exception handler (was | |
2978 // destroyed) | |
2979 __ restore_locals(); // make sure locals pointer is correct as well | |
2980 // (was destroyed) | |
2981 __ call_VM(noreg, CAST_FROM_FN_PTR(address, | |
2982 InterpreterRuntime::throw_IncompatibleClassChangeError)); | |
2983 // the call_VM checks for exception, so we should never return here. | |
2984 __ should_not_reach_here(); | |
2985 __ bind(interface_ok); | |
2986 | |
2987 __ popq(rdx); | |
2988 | |
2989 __ cmpq(rax, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); | |
2990 __ jcc(Assembler::notEqual, search); | |
2991 | |
2992 __ movl(rdx, Address(rdx, itableOffsetEntry::offset_offset_in_bytes())); | |
2993 | |
2994 __ addq(rdx, r14); // Add offset to klassOop | |
2995 assert(itableMethodEntry::size() * wordSize == 8, | |
2996 "adjust the scaling in the code below"); | |
2997 __ movq(rbx, Address(rdx, rbx, Address::times_8)); | |
2998 // rbx: methodOop to call | |
2999 // rcx: receiver | |
3000 // Check for abstract method error | |
3001 // Note: This should be done more efficiently via a | |
3002 // throw_abstract_method_error interpreter entry point and a | |
3003 // conditional jump to it in case of a null method. | |
3004 { | |
3005 Label L; | |
3006 __ testq(rbx, rbx); | |
3007 __ jcc(Assembler::notZero, L); | |
3008 // throw exception | |
3009 // note: must restore interpreter registers to canonical | |
3010 // state for exception handling to work correctly! | |
3011 __ popq(rbx); // pop return address (pushed by prepare_invoke) | |
3012 __ restore_bcp(); // r13 must be correct for exception handler | |
3013 // (was destroyed) | |
3014 __ restore_locals(); // make sure locals pointer is correct as | |
3015 // well (was destroyed) | |
3016 __ call_VM(noreg, | |
3017 CAST_FROM_FN_PTR(address, | |
3018 InterpreterRuntime::throw_AbstractMethodError)); | |
3019 // the call_VM checks for exception, so we should never return here. | |
3020 __ should_not_reach_here(); | |
3021 __ bind(L); | |
3022 } | |
3023 | |
3024 __ movq(rcx, Address(rbx, methodOopDesc::interpreter_entry_offset())); | |
3025 | |
3026 // do the call | |
3027 // rcx: receiver | |
3028 // rbx: methodOop | |
3029 __ jump_from_interpreted(rbx, rdx); | |
3030 } | |
3031 | |
3032 //----------------------------------------------------------------------------- | |
3033 // Allocation | |
3034 | |
3035 void TemplateTable::_new() { | |
3036 transition(vtos, atos); | |
3037 __ get_unsigned_2_byte_index_at_bcp(rdx, 1); | |
3038 Label slow_case; | |
3039 Label done; | |
3040 Label initialize_header; | |
3041 Label initialize_object; // including clearing the fields | |
3042 Label allocate_shared; | |
3043 ExternalAddress top((address)Universe::heap()->top_addr()); | |
3044 ExternalAddress end((address)Universe::heap()->end_addr()); | |
3045 | |
3046 __ get_cpool_and_tags(rsi, rax); | |
3047 // get instanceKlass | |
3048 __ movq(rsi, Address(rsi, rdx, | |
3049 Address::times_8, sizeof(constantPoolOopDesc))); | |
3050 | |
3051 // make sure the class we're about to instantiate has been | |
3052 // resolved. Note: slow_case does a pop of stack, which is why we | |
3053 // loaded class/pushed above | |
3054 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; | |
3055 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), | |
3056 JVM_CONSTANT_Class); | |
3057 __ jcc(Assembler::notEqual, slow_case); | |
3058 | |
3059 // make sure klass is initialized & doesn't have finalizer | |
3060 // make sure klass is fully initialized | |
3061 __ cmpl(Address(rsi, | |
3062 instanceKlass::init_state_offset_in_bytes() + | |
3063 sizeof(oopDesc)), | |
3064 instanceKlass::fully_initialized); | |
3065 __ jcc(Assembler::notEqual, slow_case); | |
3066 | |
3067 // get instance_size in instanceKlass (scaled to a count of bytes) | |
3068 __ movl(rdx, | |
3069 Address(rsi, | |
3070 Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); | |
3071 // test to see if it has a finalizer or is malformed in some way | |
3072 __ testl(rdx, Klass::_lh_instance_slow_path_bit); | |
3073 __ jcc(Assembler::notZero, slow_case); | |
3074 | |
3075 // Allocate the instance | |
3076 // 1) Try to allocate in the TLAB | |
3077 // 2) if fail and the object is large allocate in the shared Eden | |
3078 // 3) if the above fails (or is not applicable), go to a slow case | |
3079 // (creates a new TLAB, etc.) | |
3080 | |
3081 const bool allow_shared_alloc = | |
3082 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; | |
3083 | |
3084 if (UseTLAB) { | |
3085 __ movq(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); | |
3086 __ leaq(rbx, Address(rax, rdx, Address::times_1)); | |
3087 __ cmpq(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); | |
3088 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); | |
3089 __ movq(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx); | |
3090 if (ZeroTLAB) { | |
3091 // the fields have been already cleared | |
3092 __ jmp(initialize_header); | |
3093 } else { | |
3094 // initialize both the header and fields | |
3095 __ jmp(initialize_object); | |
3096 } | |
3097 } | |
3098 | |
3099 // Allocation in the shared Eden, if allowed. | |
3100 // | |
3101 // rdx: instance size in bytes | |
3102 if (allow_shared_alloc) { | |
3103 __ bind(allocate_shared); | |
3104 | |
3105 const Register RtopAddr = rscratch1; | |
3106 const Register RendAddr = rscratch2; | |
3107 | |
3108 __ lea(RtopAddr, top); | |
3109 __ lea(RendAddr, end); | |
3110 __ movq(rax, Address(RtopAddr, 0)); | |
3111 | |
3112 // For retries rax gets set by cmpxchgq | |
3113 Label retry; | |
3114 __ bind(retry); | |
3115 __ leaq(rbx, Address(rax, rdx, Address::times_1)); | |
3116 __ cmpq(rbx, Address(RendAddr, 0)); | |
3117 __ jcc(Assembler::above, slow_case); | |
3118 | |
3119 // Compare rax with the top addr, and if still equal, store the new | |
3120 // top addr in rbx at the address of the top addr pointer. Sets ZF if was | |
3121 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. | |
3122 // | |
3123 // rax: object begin | |
3124 // rbx: object end | |
3125 // rdx: instance size in bytes | |
3126 if (os::is_MP()) { | |
3127 __ lock(); | |
3128 } | |
3129 __ cmpxchgq(rbx, Address(RtopAddr, 0)); | |
3130 | |
3131 // if someone beat us on the allocation, try again, otherwise continue | |
3132 __ jcc(Assembler::notEqual, retry); | |
3133 } | |
3134 | |
3135 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { | |
3136 // The object is initialized before the header. If the object size is | |
3137 // zero, go directly to the header initialization. | |
3138 __ bind(initialize_object); | |
3139 __ decrementl(rdx, sizeof(oopDesc)); | |
3140 __ jcc(Assembler::zero, initialize_header); | |
3141 | |
3142 // Initialize object fields | |
3143 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) | |
3144 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop | |
3145 { | |
3146 Label loop; | |
3147 __ bind(loop); | |
3148 __ movq(Address(rax, rdx, Address::times_8, | |
3149 sizeof(oopDesc) - oopSize), | |
3150 rcx); | |
3151 __ decrementl(rdx); | |
3152 __ jcc(Assembler::notZero, loop); | |
3153 } | |
3154 | |
3155 // initialize object header only. | |
3156 __ bind(initialize_header); | |
3157 if (UseBiasedLocking) { | |
3158 __ movq(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); | |
3159 __ movq(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1); | |
3160 } else { | |
3161 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), | |
3162 (intptr_t) markOopDesc::prototype()); // header (address 0x1) | |
3163 } | |
3164 __ movq(Address(rax, oopDesc::klass_offset_in_bytes()), rsi); // klass | |
3165 __ jmp(done); | |
3166 } | |
3167 | |
3168 { | |
3169 SkipIfEqual skip(_masm, &DTraceAllocProbes, false); | |
3170 // Trigger dtrace event for fastpath | |
3171 __ push(atos); // save the return value | |
3172 __ call_VM_leaf( | |
3173 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax); | |
3174 __ pop(atos); // restore the return value | |
3175 } | |
3176 | |
3177 // slow case | |
3178 __ bind(slow_case); | |
3179 __ get_constant_pool(c_rarg1); | |
3180 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); | |
3181 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2); | |
3182 __ verify_oop(rax); | |
3183 | |
3184 // continue | |
3185 __ bind(done); | |
3186 } | |
3187 | |
3188 void TemplateTable::newarray() { | |
3189 transition(itos, atos); | |
3190 __ load_unsigned_byte(c_rarg1, at_bcp(1)); | |
3191 __ movl(c_rarg2, rax); | |
3192 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), | |
3193 c_rarg1, c_rarg2); | |
3194 } | |
3195 | |
3196 void TemplateTable::anewarray() { | |
3197 transition(itos, atos); | |
3198 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); | |
3199 __ get_constant_pool(c_rarg1); | |
3200 __ movl(c_rarg3, rax); | |
3201 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), | |
3202 c_rarg1, c_rarg2, c_rarg3); | |
3203 } | |
3204 | |
3205 void TemplateTable::arraylength() { | |
3206 transition(atos, itos); | |
3207 __ null_check(rax, arrayOopDesc::length_offset_in_bytes()); | |
3208 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes())); | |
3209 } | |
3210 | |
3211 void TemplateTable::checkcast() { | |
3212 transition(atos, atos); | |
3213 Label done, is_null, ok_is_subtype, quicked, resolved; | |
3214 __ testq(rax, rax); // object is in rax | |
3215 __ jcc(Assembler::zero, is_null); | |
3216 | |
3217 // Get cpool & tags index | |
3218 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array | |
3219 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index | |
3220 // See if bytecode has already been quicked | |
3221 __ cmpb(Address(rdx, rbx, | |
3222 Address::times_1, | |
3223 typeArrayOopDesc::header_size(T_BYTE) * wordSize), | |
3224 JVM_CONSTANT_Class); | |
3225 __ jcc(Assembler::equal, quicked); | |
3226 | |
3227 __ movq(r12, rcx); // save rcx XXX | |
3228 __ push(atos); // save receiver for result, and for GC | |
3229 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); | |
3230 __ pop_ptr(rdx); // restore receiver | |
3231 __ movq(rcx, r12); // restore rcx XXX | |
3232 __ jmpb(resolved); | |
3233 | |
3234 // Get superklass in rax and subklass in rbx | |
3235 __ bind(quicked); | |
3236 __ movq(rdx, rax); // Save object in rdx; rax needed for subtype check | |
3237 __ movq(rax, Address(rcx, rbx, | |
3238 Address::times_8, sizeof(constantPoolOopDesc))); | |
3239 | |
3240 __ bind(resolved); | |
3241 __ movq(rbx, Address(rdx, oopDesc::klass_offset_in_bytes())); | |
3242 | |
3243 // Generate subtype check. Blows rcx, rdi. Object in rdx. | |
3244 // Superklass in rax. Subklass in rbx. | |
3245 __ gen_subtype_check(rbx, ok_is_subtype); | |
3246 | |
3247 // Come here on failure | |
3248 __ push_ptr(rdx); | |
3249 // object is at TOS | |
3250 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry)); | |
3251 | |
3252 // Come here on success | |
3253 __ bind(ok_is_subtype); | |
3254 __ movq(rax, rdx); // Restore object in rdx | |
3255 | |
3256 // Collect counts on whether this check-cast sees NULLs a lot or not. | |
3257 if (ProfileInterpreter) { | |
3258 __ jmp(done); | |
3259 __ bind(is_null); | |
3260 __ profile_null_seen(rcx); | |
3261 } else { | |
3262 __ bind(is_null); // same as 'done' | |
3263 } | |
3264 __ bind(done); | |
3265 } | |
3266 | |
3267 void TemplateTable::instanceof() { | |
3268 transition(atos, itos); | |
3269 Label done, is_null, ok_is_subtype, quicked, resolved; | |
3270 __ testq(rax, rax); | |
3271 __ jcc(Assembler::zero, is_null); | |
3272 | |
3273 // Get cpool & tags index | |
3274 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array | |
3275 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index | |
3276 // See if bytecode has already been quicked | |
3277 __ cmpb(Address(rdx, rbx, | |
3278 Address::times_1, | |
3279 typeArrayOopDesc::header_size(T_BYTE) * wordSize), | |
3280 JVM_CONSTANT_Class); | |
3281 __ jcc(Assembler::equal, quicked); | |
3282 | |
3283 __ movq(r12, rcx); // save rcx | |
3284 __ push(atos); // save receiver for result, and for GC | |
3285 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); | |
3286 __ pop_ptr(rdx); // restore receiver | |
3287 __ movq(rdx, Address(rdx, oopDesc::klass_offset_in_bytes())); | |
3288 __ movq(rcx, r12); // restore rcx | |
3289 __ jmpb(resolved); | |
3290 | |
3291 // Get superklass in rax and subklass in rdx | |
3292 __ bind(quicked); | |
3293 __ movq(rdx, Address(rax, oopDesc::klass_offset_in_bytes())); | |
3294 __ movq(rax, Address(rcx, rbx, | |
3295 Address::times_8, sizeof(constantPoolOopDesc))); | |
3296 | |
3297 __ bind(resolved); | |
3298 | |
3299 // Generate subtype check. Blows rcx, rdi | |
3300 // Superklass in rax. Subklass in rdx. | |
3301 __ gen_subtype_check(rdx, ok_is_subtype); | |
3302 | |
3303 // Come here on failure | |
3304 __ xorl(rax, rax); | |
3305 __ jmpb(done); | |
3306 // Come here on success | |
3307 __ bind(ok_is_subtype); | |
3308 __ movl(rax, 1); | |
3309 | |
3310 // Collect counts on whether this test sees NULLs a lot or not. | |
3311 if (ProfileInterpreter) { | |
3312 __ jmp(done); | |
3313 __ bind(is_null); | |
3314 __ profile_null_seen(rcx); | |
3315 } else { | |
3316 __ bind(is_null); // same as 'done' | |
3317 } | |
3318 __ bind(done); | |
3319 // rax = 0: obj == NULL or obj is not an instanceof the specified klass | |
3320 // rax = 1: obj != NULL and obj is an instanceof the specified klass | |
3321 } | |
3322 | |
3323 //----------------------------------------------------------------------------- | |
3324 // Breakpoints | |
3325 void TemplateTable::_breakpoint() { | |
3326 // Note: We get here even if we are single stepping.. | |
3327 // jbug inists on setting breakpoints at every bytecode | |
3328 // even if we are in single step mode. | |
3329 | |
3330 transition(vtos, vtos); | |
3331 | |
3332 // get the unpatched byte code | |
3333 __ get_method(c_rarg1); | |
3334 __ call_VM(noreg, | |
3335 CAST_FROM_FN_PTR(address, | |
3336 InterpreterRuntime::get_original_bytecode_at), | |
3337 c_rarg1, r13); | |
3338 __ movq(rbx, rax); | |
3339 | |
3340 // post the breakpoint event | |
3341 __ get_method(c_rarg1); | |
3342 __ call_VM(noreg, | |
3343 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), | |
3344 c_rarg1, r13); | |
3345 | |
3346 // complete the execution of original bytecode | |
3347 __ dispatch_only_normal(vtos); | |
3348 } | |
3349 | |
3350 //----------------------------------------------------------------------------- | |
3351 // Exceptions | |
3352 | |
3353 void TemplateTable::athrow() { | |
3354 transition(atos, vtos); | |
3355 __ null_check(rax); | |
3356 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); | |
3357 } | |
3358 | |
3359 //----------------------------------------------------------------------------- | |
3360 // Synchronization | |
3361 // | |
3362 // Note: monitorenter & exit are symmetric routines; which is reflected | |
3363 // in the assembly code structure as well | |
3364 // | |
3365 // Stack layout: | |
3366 // | |
3367 // [expressions ] <--- rsp = expression stack top | |
3368 // .. | |
3369 // [expressions ] | |
3370 // [monitor entry] <--- monitor block top = expression stack bot | |
3371 // .. | |
3372 // [monitor entry] | |
3373 // [frame data ] <--- monitor block bot | |
3374 // ... | |
3375 // [saved rbp ] <--- rbp | |
3376 void TemplateTable::monitorenter() { | |
3377 transition(atos, vtos); | |
3378 | |
3379 // check for NULL object | |
3380 __ null_check(rax); | |
3381 | |
3382 const Address monitor_block_top( | |
3383 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); | |
3384 const Address monitor_block_bot( | |
3385 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); | |
3386 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; | |
3387 | |
3388 Label allocated; | |
3389 | |
3390 // initialize entry pointer | |
3391 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL | |
3392 | |
3393 // find a free slot in the monitor block (result in c_rarg1) | |
3394 { | |
3395 Label entry, loop, exit; | |
3396 __ movq(c_rarg3, monitor_block_top); // points to current entry, | |
3397 // starting with top-most entry | |
3398 __ leaq(c_rarg2, monitor_block_bot); // points to word before bottom | |
3399 // of monitor block | |
3400 __ jmpb(entry); | |
3401 | |
3402 __ bind(loop); | |
3403 // check if current entry is used | |
3404 __ cmpq(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int) NULL); | |
3405 // if not used then remember entry in c_rarg1 | |
3406 __ cmovq(Assembler::equal, c_rarg1, c_rarg3); | |
3407 // check if current entry is for same object | |
3408 __ cmpq(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes())); | |
3409 // if same object then stop searching | |
3410 __ jccb(Assembler::equal, exit); | |
3411 // otherwise advance to next entry | |
3412 __ addq(c_rarg3, entry_size); | |
3413 __ bind(entry); | |
3414 // check if bottom reached | |
3415 __ cmpq(c_rarg3, c_rarg2); | |
3416 // if not at bottom then check this entry | |
3417 __ jcc(Assembler::notEqual, loop); | |
3418 __ bind(exit); | |
3419 } | |
3420 | |
3421 __ testq(c_rarg1, c_rarg1); // check if a slot has been found | |
3422 __ jcc(Assembler::notZero, allocated); // if found, continue with that one | |
3423 | |
3424 // allocate one if there's no free slot | |
3425 { | |
3426 Label entry, loop; | |
3427 // 1. compute new pointers // rsp: old expression stack top | |
3428 __ movq(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom | |
3429 __ subq(rsp, entry_size); // move expression stack top | |
3430 __ subq(c_rarg1, entry_size); // move expression stack bottom | |
3431 __ movq(c_rarg3, rsp); // set start value for copy loop | |
3432 __ movq(monitor_block_bot, c_rarg1); // set new monitor block bottom | |
3433 __ jmp(entry); | |
3434 // 2. move expression stack contents | |
3435 __ bind(loop); | |
3436 __ movq(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack | |
3437 // word from old location | |
3438 __ movq(Address(c_rarg3, 0), c_rarg2); // and store it at new location | |
3439 __ addq(c_rarg3, wordSize); // advance to next word | |
3440 __ bind(entry); | |
3441 __ cmpq(c_rarg3, c_rarg1); // check if bottom reached | |
3442 __ jcc(Assembler::notEqual, loop); // if not at bottom then | |
3443 // copy next word | |
3444 } | |
3445 | |
3446 // call run-time routine | |
3447 // c_rarg1: points to monitor entry | |
3448 __ bind(allocated); | |
3449 | |
3450 // Increment bcp to point to the next bytecode, so exception | |
3451 // handling for async. exceptions work correctly. | |
3452 // The object has already been poped from the stack, so the | |
3453 // expression stack looks correct. | |
3454 __ incrementq(r13); | |
3455 | |
3456 // store object | |
3457 __ movq(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax); | |
3458 __ lock_object(c_rarg1); | |
3459 | |
3460 // check to make sure this monitor doesn't cause stack overflow after locking | |
3461 __ save_bcp(); // in case of exception | |
3462 __ generate_stack_overflow_check(0); | |
3463 | |
3464 // The bcp has already been incremented. Just need to dispatch to | |
3465 // next instruction. | |
3466 __ dispatch_next(vtos); | |
3467 } | |
3468 | |
3469 | |
3470 void TemplateTable::monitorexit() { | |
3471 transition(atos, vtos); | |
3472 | |
3473 // check for NULL object | |
3474 __ null_check(rax); | |
3475 | |
3476 const Address monitor_block_top( | |
3477 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); | |
3478 const Address monitor_block_bot( | |
3479 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); | |
3480 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; | |
3481 | |
3482 Label found; | |
3483 | |
3484 // find matching slot | |
3485 { | |
3486 Label entry, loop; | |
3487 __ movq(c_rarg1, monitor_block_top); // points to current entry, | |
3488 // starting with top-most entry | |
3489 __ leaq(c_rarg2, monitor_block_bot); // points to word before bottom | |
3490 // of monitor block | |
3491 __ jmpb(entry); | |
3492 | |
3493 __ bind(loop); | |
3494 // check if current entry is for same object | |
3495 __ cmpq(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); | |
3496 // if same object then stop searching | |
3497 __ jcc(Assembler::equal, found); | |
3498 // otherwise advance to next entry | |
3499 __ addq(c_rarg1, entry_size); | |
3500 __ bind(entry); | |
3501 // check if bottom reached | |
3502 __ cmpq(c_rarg1, c_rarg2); | |
3503 // if not at bottom then check this entry | |
3504 __ jcc(Assembler::notEqual, loop); | |
3505 } | |
3506 | |
3507 // error handling. Unlocking was not block-structured | |
3508 __ call_VM(noreg, CAST_FROM_FN_PTR(address, | |
3509 InterpreterRuntime::throw_illegal_monitor_state_exception)); | |
3510 __ should_not_reach_here(); | |
3511 | |
3512 // call run-time routine | |
3513 // rsi: points to monitor entry | |
3514 __ bind(found); | |
3515 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps) | |
3516 __ unlock_object(c_rarg1); | |
3517 __ pop_ptr(rax); // discard object | |
3518 } | |
3519 | |
3520 | |
3521 // Wide instructions | |
3522 void TemplateTable::wide() { | |
3523 transition(vtos, vtos); | |
3524 __ load_unsigned_byte(rbx, at_bcp(1)); | |
3525 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point)); | |
3526 __ jmp(Address(rscratch1, rbx, Address::times_8)); | |
3527 // Note: the r13 increment step is part of the individual wide | |
3528 // bytecode implementations | |
3529 } | |
3530 | |
3531 | |
3532 // Multi arrays | |
3533 void TemplateTable::multianewarray() { | |
3534 transition(vtos, atos); | |
3535 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions | |
3536 // last dim is on top of stack; we want address of first one: | |
3537 // first_addr = last_addr + (ndims - 1) * wordSize | |
3538 if (TaggedStackInterpreter) __ shll(rax, 1); // index*2 | |
3539 __ leaq(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize)); | |
3540 call_VM(rax, | |
3541 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), | |
3542 c_rarg1); | |
3543 __ load_unsigned_byte(rbx, at_bcp(3)); | |
3544 if (TaggedStackInterpreter) __ shll(rbx, 1); // index*2 | |
3545 __ leaq(rsp, Address(rsp, rbx, Address::times_8)); | |
3546 } |