Mercurial > hg > graal-jvmci-8
annotate src/cpu/x86/vm/templateTable_x86_64.cpp @ 304:dc7f315e41f7
5108146: Merge i486 and amd64 cpu directories
6459804: Want client (c1) compiler for x86_64 (amd64) for faster start-up
Reviewed-by: kvn
author | never |
---|---|
date | Wed, 27 Aug 2008 00:21:55 -0700 |
parents | d1605aabd0a1 |
children | f8199438385b |
rev | line source |
---|---|
0 | 1 /* |
196 | 2 * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_templateTable_x86_64.cpp.incl" | |
27 | |
304 | 28 #ifndef CC_INTERP |
29 | |
0 | 30 #define __ _masm-> |
31 | |
32 // Platform-dependent initialization | |
33 | |
34 void TemplateTable::pd_initialize() { | |
35 // No amd64 specific initialization | |
36 } | |
37 | |
38 // Address computation: local variables | |
39 | |
40 static inline Address iaddress(int n) { | |
41 return Address(r14, Interpreter::local_offset_in_bytes(n)); | |
42 } | |
43 | |
44 static inline Address laddress(int n) { | |
45 return iaddress(n + 1); | |
46 } | |
47 | |
48 static inline Address faddress(int n) { | |
49 return iaddress(n); | |
50 } | |
51 | |
52 static inline Address daddress(int n) { | |
53 return laddress(n); | |
54 } | |
55 | |
56 static inline Address aaddress(int n) { | |
57 return iaddress(n); | |
58 } | |
59 | |
60 static inline Address iaddress(Register r) { | |
61 return Address(r14, r, Address::times_8, Interpreter::value_offset_in_bytes()); | |
62 } | |
63 | |
64 static inline Address laddress(Register r) { | |
65 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1)); | |
66 } | |
67 | |
68 static inline Address faddress(Register r) { | |
69 return iaddress(r); | |
70 } | |
71 | |
72 static inline Address daddress(Register r) { | |
73 return laddress(r); | |
74 } | |
75 | |
76 static inline Address aaddress(Register r) { | |
77 return iaddress(r); | |
78 } | |
79 | |
80 static inline Address at_rsp() { | |
81 return Address(rsp, 0); | |
82 } | |
83 | |
84 // At top of Java expression stack which may be different than esp(). It | |
85 // isn't for category 1 objects. | |
86 static inline Address at_tos () { | |
87 return Address(rsp, Interpreter::expr_offset_in_bytes(0)); | |
88 } | |
89 | |
90 static inline Address at_tos_p1() { | |
91 return Address(rsp, Interpreter::expr_offset_in_bytes(1)); | |
92 } | |
93 | |
94 static inline Address at_tos_p2() { | |
95 return Address(rsp, Interpreter::expr_offset_in_bytes(2)); | |
96 } | |
97 | |
98 static inline Address at_tos_p3() { | |
99 return Address(rsp, Interpreter::expr_offset_in_bytes(3)); | |
100 } | |
101 | |
102 // Condition conversion | |
103 static Assembler::Condition j_not(TemplateTable::Condition cc) { | |
104 switch (cc) { | |
105 case TemplateTable::equal : return Assembler::notEqual; | |
106 case TemplateTable::not_equal : return Assembler::equal; | |
107 case TemplateTable::less : return Assembler::greaterEqual; | |
108 case TemplateTable::less_equal : return Assembler::greater; | |
109 case TemplateTable::greater : return Assembler::lessEqual; | |
110 case TemplateTable::greater_equal: return Assembler::less; | |
111 } | |
112 ShouldNotReachHere(); | |
113 return Assembler::zero; | |
114 } | |
115 | |
116 | |
117 // Miscelaneous helper routines | |
118 | |
119 Address TemplateTable::at_bcp(int offset) { | |
120 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); | |
121 return Address(r13, offset); | |
122 } | |
123 | |
124 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc, | |
125 Register scratch, | |
126 bool load_bc_into_scratch/*=true*/) { | |
127 if (!RewriteBytecodes) { | |
128 return; | |
129 } | |
130 // the pair bytecodes have already done the load. | |
131 if (load_bc_into_scratch) { | |
132 __ movl(bc, bytecode); | |
133 } | |
134 Label patch_done; | |
135 if (JvmtiExport::can_post_breakpoint()) { | |
136 Label fast_patch; | |
137 // if a breakpoint is present we can't rewrite the stream directly | |
138 __ movzbl(scratch, at_bcp(0)); | |
139 __ cmpl(scratch, Bytecodes::_breakpoint); | |
140 __ jcc(Assembler::notEqual, fast_patch); | |
141 __ get_method(scratch); | |
142 // Let breakpoint table handling rewrite to quicker bytecode | |
143 __ call_VM(noreg, | |
144 CAST_FROM_FN_PTR(address, | |
145 InterpreterRuntime::set_original_bytecode_at), | |
146 scratch, r13, bc); | |
147 #ifndef ASSERT | |
148 __ jmpb(patch_done); | |
149 __ bind(fast_patch); | |
150 } | |
151 #else | |
152 __ jmp(patch_done); | |
153 __ bind(fast_patch); | |
154 } | |
155 Label okay; | |
156 __ load_unsigned_byte(scratch, at_bcp(0)); | |
157 __ cmpl(scratch, (int) Bytecodes::java_code(bytecode)); | |
158 __ jcc(Assembler::equal, okay); | |
159 __ cmpl(scratch, bc); | |
160 __ jcc(Assembler::equal, okay); | |
161 __ stop("patching the wrong bytecode"); | |
162 __ bind(okay); | |
163 #endif | |
164 // patch bytecode | |
165 __ movb(at_bcp(0), bc); | |
166 __ bind(patch_done); | |
167 } | |
168 | |
169 | |
170 // Individual instructions | |
171 | |
172 void TemplateTable::nop() { | |
173 transition(vtos, vtos); | |
174 // nothing to do | |
175 } | |
176 | |
177 void TemplateTable::shouldnotreachhere() { | |
178 transition(vtos, vtos); | |
179 __ stop("shouldnotreachhere bytecode"); | |
180 } | |
181 | |
182 void TemplateTable::aconst_null() { | |
183 transition(vtos, atos); | |
184 __ xorl(rax, rax); | |
185 } | |
186 | |
187 void TemplateTable::iconst(int value) { | |
188 transition(vtos, itos); | |
189 if (value == 0) { | |
190 __ xorl(rax, rax); | |
191 } else { | |
192 __ movl(rax, value); | |
193 } | |
194 } | |
195 | |
196 void TemplateTable::lconst(int value) { | |
197 transition(vtos, ltos); | |
198 if (value == 0) { | |
199 __ xorl(rax, rax); | |
200 } else { | |
201 __ movl(rax, value); | |
202 } | |
203 } | |
204 | |
205 void TemplateTable::fconst(int value) { | |
206 transition(vtos, ftos); | |
207 static float one = 1.0f, two = 2.0f; | |
208 switch (value) { | |
209 case 0: | |
210 __ xorps(xmm0, xmm0); | |
211 break; | |
212 case 1: | |
213 __ movflt(xmm0, ExternalAddress((address) &one)); | |
214 break; | |
215 case 2: | |
216 __ movflt(xmm0, ExternalAddress((address) &two)); | |
217 break; | |
218 default: | |
219 ShouldNotReachHere(); | |
220 break; | |
221 } | |
222 } | |
223 | |
224 void TemplateTable::dconst(int value) { | |
225 transition(vtos, dtos); | |
226 static double one = 1.0; | |
227 switch (value) { | |
228 case 0: | |
229 __ xorpd(xmm0, xmm0); | |
230 break; | |
231 case 1: | |
232 __ movdbl(xmm0, ExternalAddress((address) &one)); | |
233 break; | |
234 default: | |
235 ShouldNotReachHere(); | |
236 break; | |
237 } | |
238 } | |
239 | |
240 void TemplateTable::bipush() { | |
241 transition(vtos, itos); | |
242 __ load_signed_byte(rax, at_bcp(1)); | |
243 } | |
244 | |
245 void TemplateTable::sipush() { | |
246 transition(vtos, itos); | |
247 __ load_unsigned_word(rax, at_bcp(1)); | |
248 __ bswapl(rax); | |
249 __ sarl(rax, 16); | |
250 } | |
251 | |
252 void TemplateTable::ldc(bool wide) { | |
253 transition(vtos, vtos); | |
254 Label call_ldc, notFloat, notClass, Done; | |
255 | |
256 if (wide) { | |
257 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); | |
258 } else { | |
259 __ load_unsigned_byte(rbx, at_bcp(1)); | |
260 } | |
261 | |
262 __ get_cpool_and_tags(rcx, rax); | |
263 const int base_offset = constantPoolOopDesc::header_size() * wordSize; | |
264 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; | |
265 | |
266 // get type | |
267 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset)); | |
268 | |
269 // unresolved string - get the resolved string | |
270 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString); | |
271 __ jccb(Assembler::equal, call_ldc); | |
272 | |
273 // unresolved class - get the resolved class | |
274 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass); | |
275 __ jccb(Assembler::equal, call_ldc); | |
276 | |
277 // unresolved class in error state - call into runtime to throw the error | |
278 // from the first resolution attempt | |
279 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError); | |
280 __ jccb(Assembler::equal, call_ldc); | |
281 | |
282 // resolved class - need to call vm to get java mirror of the class | |
283 __ cmpl(rdx, JVM_CONSTANT_Class); | |
284 __ jcc(Assembler::notEqual, notClass); | |
285 | |
286 __ bind(call_ldc); | |
287 __ movl(c_rarg1, wide); | |
288 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1); | |
289 __ push_ptr(rax); | |
290 __ verify_oop(rax); | |
291 __ jmp(Done); | |
292 | |
293 __ bind(notClass); | |
294 __ cmpl(rdx, JVM_CONSTANT_Float); | |
295 __ jccb(Assembler::notEqual, notFloat); | |
296 // ftos | |
297 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); | |
298 __ push_f(); | |
299 __ jmp(Done); | |
300 | |
301 __ bind(notFloat); | |
302 #ifdef ASSERT | |
303 { | |
304 Label L; | |
305 __ cmpl(rdx, JVM_CONSTANT_Integer); | |
306 __ jcc(Assembler::equal, L); | |
307 __ cmpl(rdx, JVM_CONSTANT_String); | |
308 __ jcc(Assembler::equal, L); | |
309 __ stop("unexpected tag type in ldc"); | |
310 __ bind(L); | |
311 } | |
312 #endif | |
313 // atos and itos | |
314 Label isOop; | |
315 __ cmpl(rdx, JVM_CONSTANT_Integer); | |
316 __ jcc(Assembler::notEqual, isOop); | |
317 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset)); | |
318 __ push_i(rax); | |
319 __ jmp(Done); | |
320 | |
321 __ bind(isOop); | |
304 | 322 __ movptr(rax, Address(rcx, rbx, Address::times_8, base_offset)); |
0 | 323 __ push_ptr(rax); |
324 | |
325 if (VerifyOops) { | |
326 __ verify_oop(rax); | |
327 } | |
328 | |
329 __ bind(Done); | |
330 } | |
331 | |
332 void TemplateTable::ldc2_w() { | |
333 transition(vtos, vtos); | |
334 Label Long, Done; | |
335 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); | |
336 | |
337 __ get_cpool_and_tags(rcx, rax); | |
338 const int base_offset = constantPoolOopDesc::header_size() * wordSize; | |
339 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; | |
340 | |
341 // get type | |
342 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), | |
343 JVM_CONSTANT_Double); | |
344 __ jccb(Assembler::notEqual, Long); | |
345 // dtos | |
346 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); | |
347 __ push_d(); | |
348 __ jmpb(Done); | |
349 | |
350 __ bind(Long); | |
351 // ltos | |
352 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset)); | |
353 __ push_l(); | |
354 | |
355 __ bind(Done); | |
356 } | |
357 | |
358 void TemplateTable::locals_index(Register reg, int offset) { | |
359 __ load_unsigned_byte(reg, at_bcp(offset)); | |
304 | 360 __ negptr(reg); |
361 if (TaggedStackInterpreter) __ shlptr(reg, 1); // index = index*2 | |
0 | 362 } |
363 | |
364 void TemplateTable::iload() { | |
365 transition(vtos, itos); | |
366 if (RewriteFrequentPairs) { | |
367 Label rewrite, done; | |
368 const Register bc = c_rarg3; | |
369 assert(rbx != bc, "register damaged"); | |
370 | |
371 // get next byte | |
372 __ load_unsigned_byte(rbx, | |
373 at_bcp(Bytecodes::length_for(Bytecodes::_iload))); | |
374 // if _iload, wait to rewrite to iload2. We only want to rewrite the | |
375 // last two iloads in a pair. Comparing against fast_iload means that | |
376 // the next bytecode is neither an iload or a caload, and therefore | |
377 // an iload pair. | |
378 __ cmpl(rbx, Bytecodes::_iload); | |
379 __ jcc(Assembler::equal, done); | |
380 | |
381 __ cmpl(rbx, Bytecodes::_fast_iload); | |
382 __ movl(bc, Bytecodes::_fast_iload2); | |
383 __ jccb(Assembler::equal, rewrite); | |
384 | |
385 // if _caload, rewrite to fast_icaload | |
386 __ cmpl(rbx, Bytecodes::_caload); | |
387 __ movl(bc, Bytecodes::_fast_icaload); | |
388 __ jccb(Assembler::equal, rewrite); | |
389 | |
390 // rewrite so iload doesn't check again. | |
391 __ movl(bc, Bytecodes::_fast_iload); | |
392 | |
393 // rewrite | |
394 // bc: fast bytecode | |
395 __ bind(rewrite); | |
396 patch_bytecode(Bytecodes::_iload, bc, rbx, false); | |
397 __ bind(done); | |
398 } | |
399 | |
400 // Get the local value into tos | |
401 locals_index(rbx); | |
402 __ movl(rax, iaddress(rbx)); | |
403 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
404 } | |
405 | |
406 void TemplateTable::fast_iload2() { | |
407 transition(vtos, itos); | |
408 locals_index(rbx); | |
409 __ movl(rax, iaddress(rbx)); | |
410 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
411 __ push(itos); | |
412 locals_index(rbx, 3); | |
413 __ movl(rax, iaddress(rbx)); | |
414 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
415 } | |
416 | |
417 void TemplateTable::fast_iload() { | |
418 transition(vtos, itos); | |
419 locals_index(rbx); | |
420 __ movl(rax, iaddress(rbx)); | |
421 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
422 } | |
423 | |
424 void TemplateTable::lload() { | |
425 transition(vtos, ltos); | |
426 locals_index(rbx); | |
427 __ movq(rax, laddress(rbx)); | |
428 debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); | |
429 } | |
430 | |
431 void TemplateTable::fload() { | |
432 transition(vtos, ftos); | |
433 locals_index(rbx); | |
434 __ movflt(xmm0, faddress(rbx)); | |
435 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
436 } | |
437 | |
438 void TemplateTable::dload() { | |
439 transition(vtos, dtos); | |
440 locals_index(rbx); | |
441 __ movdbl(xmm0, daddress(rbx)); | |
442 debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); | |
443 } | |
444 | |
445 void TemplateTable::aload() { | |
446 transition(vtos, atos); | |
447 locals_index(rbx); | |
304 | 448 __ movptr(rax, aaddress(rbx)); |
0 | 449 debug_only(__ verify_local_tag(frame::TagReference, rbx)); |
450 } | |
451 | |
452 void TemplateTable::locals_index_wide(Register reg) { | |
453 __ movl(reg, at_bcp(2)); | |
454 __ bswapl(reg); | |
455 __ shrl(reg, 16); | |
304 | 456 __ negptr(reg); |
457 if (TaggedStackInterpreter) __ shlptr(reg, 1); // index = index*2 | |
0 | 458 } |
459 | |
460 void TemplateTable::wide_iload() { | |
461 transition(vtos, itos); | |
462 locals_index_wide(rbx); | |
463 __ movl(rax, iaddress(rbx)); | |
464 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
465 } | |
466 | |
467 void TemplateTable::wide_lload() { | |
468 transition(vtos, ltos); | |
469 locals_index_wide(rbx); | |
470 __ movq(rax, laddress(rbx)); | |
471 debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); | |
472 } | |
473 | |
474 void TemplateTable::wide_fload() { | |
475 transition(vtos, ftos); | |
476 locals_index_wide(rbx); | |
477 __ movflt(xmm0, faddress(rbx)); | |
478 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
479 } | |
480 | |
481 void TemplateTable::wide_dload() { | |
482 transition(vtos, dtos); | |
483 locals_index_wide(rbx); | |
484 __ movdbl(xmm0, daddress(rbx)); | |
485 debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); | |
486 } | |
487 | |
488 void TemplateTable::wide_aload() { | |
489 transition(vtos, atos); | |
490 locals_index_wide(rbx); | |
304 | 491 __ movptr(rax, aaddress(rbx)); |
0 | 492 debug_only(__ verify_local_tag(frame::TagReference, rbx)); |
493 } | |
494 | |
495 void TemplateTable::index_check(Register array, Register index) { | |
496 // destroys rbx | |
497 // check array | |
498 __ null_check(array, arrayOopDesc::length_offset_in_bytes()); | |
499 // sign extend index for use by indexed load | |
304 | 500 __ movl2ptr(index, index); |
0 | 501 // check index |
502 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); | |
503 if (index != rbx) { | |
504 // ??? convention: move aberrant index into ebx for exception message | |
505 assert(rbx != array, "different registers"); | |
506 __ movl(rbx, index); | |
507 } | |
508 __ jump_cc(Assembler::aboveEqual, | |
509 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry)); | |
510 } | |
511 | |
512 void TemplateTable::iaload() { | |
513 transition(itos, itos); | |
514 __ pop_ptr(rdx); | |
515 // eax: index | |
516 // rdx: array | |
517 index_check(rdx, rax); // kills rbx | |
518 __ movl(rax, Address(rdx, rax, | |
519 Address::times_4, | |
520 arrayOopDesc::base_offset_in_bytes(T_INT))); | |
521 } | |
522 | |
523 void TemplateTable::laload() { | |
524 transition(itos, ltos); | |
525 __ pop_ptr(rdx); | |
526 // eax: index | |
527 // rdx: array | |
528 index_check(rdx, rax); // kills rbx | |
529 __ movq(rax, Address(rdx, rbx, | |
530 Address::times_8, | |
531 arrayOopDesc::base_offset_in_bytes(T_LONG))); | |
532 } | |
533 | |
534 void TemplateTable::faload() { | |
535 transition(itos, ftos); | |
536 __ pop_ptr(rdx); | |
537 // eax: index | |
538 // rdx: array | |
539 index_check(rdx, rax); // kills rbx | |
540 __ movflt(xmm0, Address(rdx, rax, | |
541 Address::times_4, | |
542 arrayOopDesc::base_offset_in_bytes(T_FLOAT))); | |
543 } | |
544 | |
545 void TemplateTable::daload() { | |
546 transition(itos, dtos); | |
547 __ pop_ptr(rdx); | |
548 // eax: index | |
549 // rdx: array | |
550 index_check(rdx, rax); // kills rbx | |
551 __ movdbl(xmm0, Address(rdx, rax, | |
552 Address::times_8, | |
553 arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); | |
554 } | |
555 | |
556 void TemplateTable::aaload() { | |
557 transition(itos, atos); | |
558 __ pop_ptr(rdx); | |
559 // eax: index | |
560 // rdx: array | |
561 index_check(rdx, rax); // kills rbx | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
562 __ load_heap_oop(rax, Address(rdx, rax, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
563 UseCompressedOops ? Address::times_4 : Address::times_8, |
0 | 564 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); |
565 } | |
566 | |
567 void TemplateTable::baload() { | |
568 transition(itos, itos); | |
569 __ pop_ptr(rdx); | |
570 // eax: index | |
571 // rdx: array | |
572 index_check(rdx, rax); // kills rbx | |
573 __ load_signed_byte(rax, | |
574 Address(rdx, rax, | |
575 Address::times_1, | |
576 arrayOopDesc::base_offset_in_bytes(T_BYTE))); | |
577 } | |
578 | |
579 void TemplateTable::caload() { | |
580 transition(itos, itos); | |
581 __ pop_ptr(rdx); | |
582 // eax: index | |
583 // rdx: array | |
584 index_check(rdx, rax); // kills rbx | |
585 __ load_unsigned_word(rax, | |
586 Address(rdx, rax, | |
587 Address::times_2, | |
588 arrayOopDesc::base_offset_in_bytes(T_CHAR))); | |
589 } | |
590 | |
591 // iload followed by caload frequent pair | |
592 void TemplateTable::fast_icaload() { | |
593 transition(vtos, itos); | |
594 // load index out of locals | |
595 locals_index(rbx); | |
596 __ movl(rax, iaddress(rbx)); | |
597 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
598 | |
599 // eax: index | |
600 // rdx: array | |
601 __ pop_ptr(rdx); | |
602 index_check(rdx, rax); // kills rbx | |
603 __ load_unsigned_word(rax, | |
604 Address(rdx, rax, | |
605 Address::times_2, | |
606 arrayOopDesc::base_offset_in_bytes(T_CHAR))); | |
607 } | |
608 | |
609 void TemplateTable::saload() { | |
610 transition(itos, itos); | |
611 __ pop_ptr(rdx); | |
612 // eax: index | |
613 // rdx: array | |
614 index_check(rdx, rax); // kills rbx | |
615 __ load_signed_word(rax, | |
616 Address(rdx, rax, | |
617 Address::times_2, | |
618 arrayOopDesc::base_offset_in_bytes(T_SHORT))); | |
619 } | |
620 | |
621 void TemplateTable::iload(int n) { | |
622 transition(vtos, itos); | |
623 __ movl(rax, iaddress(n)); | |
624 debug_only(__ verify_local_tag(frame::TagValue, n)); | |
625 } | |
626 | |
627 void TemplateTable::lload(int n) { | |
628 transition(vtos, ltos); | |
629 __ movq(rax, laddress(n)); | |
630 debug_only(__ verify_local_tag(frame::TagCategory2, n)); | |
631 } | |
632 | |
633 void TemplateTable::fload(int n) { | |
634 transition(vtos, ftos); | |
635 __ movflt(xmm0, faddress(n)); | |
636 debug_only(__ verify_local_tag(frame::TagValue, n)); | |
637 } | |
638 | |
639 void TemplateTable::dload(int n) { | |
640 transition(vtos, dtos); | |
641 __ movdbl(xmm0, daddress(n)); | |
642 debug_only(__ verify_local_tag(frame::TagCategory2, n)); | |
643 } | |
644 | |
645 void TemplateTable::aload(int n) { | |
646 transition(vtos, atos); | |
304 | 647 __ movptr(rax, aaddress(n)); |
0 | 648 debug_only(__ verify_local_tag(frame::TagReference, n)); |
649 } | |
650 | |
651 void TemplateTable::aload_0() { | |
652 transition(vtos, atos); | |
653 // According to bytecode histograms, the pairs: | |
654 // | |
655 // _aload_0, _fast_igetfield | |
656 // _aload_0, _fast_agetfield | |
657 // _aload_0, _fast_fgetfield | |
658 // | |
659 // occur frequently. If RewriteFrequentPairs is set, the (slow) | |
660 // _aload_0 bytecode checks if the next bytecode is either | |
661 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then | |
662 // rewrites the current bytecode into a pair bytecode; otherwise it | |
663 // rewrites the current bytecode into _fast_aload_0 that doesn't do | |
664 // the pair check anymore. | |
665 // | |
666 // Note: If the next bytecode is _getfield, the rewrite must be | |
667 // delayed, otherwise we may miss an opportunity for a pair. | |
668 // | |
669 // Also rewrite frequent pairs | |
670 // aload_0, aload_1 | |
671 // aload_0, iload_1 | |
672 // These bytecodes with a small amount of code are most profitable | |
673 // to rewrite | |
674 if (RewriteFrequentPairs) { | |
675 Label rewrite, done; | |
676 const Register bc = c_rarg3; | |
677 assert(rbx != bc, "register damaged"); | |
678 // get next byte | |
679 __ load_unsigned_byte(rbx, | |
680 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); | |
681 | |
682 // do actual aload_0 | |
683 aload(0); | |
684 | |
685 // if _getfield then wait with rewrite | |
686 __ cmpl(rbx, Bytecodes::_getfield); | |
687 __ jcc(Assembler::equal, done); | |
688 | |
689 // if _igetfield then reqrite to _fast_iaccess_0 | |
690 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == | |
691 Bytecodes::_aload_0, | |
692 "fix bytecode definition"); | |
693 __ cmpl(rbx, Bytecodes::_fast_igetfield); | |
694 __ movl(bc, Bytecodes::_fast_iaccess_0); | |
695 __ jccb(Assembler::equal, rewrite); | |
696 | |
697 // if _agetfield then reqrite to _fast_aaccess_0 | |
698 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == | |
699 Bytecodes::_aload_0, | |
700 "fix bytecode definition"); | |
701 __ cmpl(rbx, Bytecodes::_fast_agetfield); | |
702 __ movl(bc, Bytecodes::_fast_aaccess_0); | |
703 __ jccb(Assembler::equal, rewrite); | |
704 | |
705 // if _fgetfield then reqrite to _fast_faccess_0 | |
706 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == | |
707 Bytecodes::_aload_0, | |
708 "fix bytecode definition"); | |
709 __ cmpl(rbx, Bytecodes::_fast_fgetfield); | |
710 __ movl(bc, Bytecodes::_fast_faccess_0); | |
711 __ jccb(Assembler::equal, rewrite); | |
712 | |
713 // else rewrite to _fast_aload0 | |
714 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == | |
715 Bytecodes::_aload_0, | |
716 "fix bytecode definition"); | |
717 __ movl(bc, Bytecodes::_fast_aload_0); | |
718 | |
719 // rewrite | |
720 // bc: fast bytecode | |
721 __ bind(rewrite); | |
722 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false); | |
723 | |
724 __ bind(done); | |
725 } else { | |
726 aload(0); | |
727 } | |
728 } | |
729 | |
730 void TemplateTable::istore() { | |
731 transition(itos, vtos); | |
732 locals_index(rbx); | |
733 __ movl(iaddress(rbx), rax); | |
734 __ tag_local(frame::TagValue, rbx); | |
735 } | |
736 | |
737 void TemplateTable::lstore() { | |
738 transition(ltos, vtos); | |
739 locals_index(rbx); | |
740 __ movq(laddress(rbx), rax); | |
741 __ tag_local(frame::TagCategory2, rbx); | |
742 } | |
743 | |
744 void TemplateTable::fstore() { | |
745 transition(ftos, vtos); | |
746 locals_index(rbx); | |
747 __ movflt(faddress(rbx), xmm0); | |
748 __ tag_local(frame::TagValue, rbx); | |
749 } | |
750 | |
751 void TemplateTable::dstore() { | |
752 transition(dtos, vtos); | |
753 locals_index(rbx); | |
754 __ movdbl(daddress(rbx), xmm0); | |
755 __ tag_local(frame::TagCategory2, rbx); | |
756 } | |
757 | |
758 void TemplateTable::astore() { | |
759 transition(vtos, vtos); | |
760 __ pop_ptr(rax, rdx); // will need to pop tag too | |
761 locals_index(rbx); | |
304 | 762 __ movptr(aaddress(rbx), rax); |
0 | 763 __ tag_local(rdx, rbx); // store tag from stack, might be returnAddr |
764 } | |
765 | |
766 void TemplateTable::wide_istore() { | |
767 transition(vtos, vtos); | |
768 __ pop_i(); | |
769 locals_index_wide(rbx); | |
770 __ movl(iaddress(rbx), rax); | |
771 __ tag_local(frame::TagValue, rbx); | |
772 } | |
773 | |
774 void TemplateTable::wide_lstore() { | |
775 transition(vtos, vtos); | |
776 __ pop_l(); | |
777 locals_index_wide(rbx); | |
778 __ movq(laddress(rbx), rax); | |
779 __ tag_local(frame::TagCategory2, rbx); | |
780 } | |
781 | |
782 void TemplateTable::wide_fstore() { | |
783 transition(vtos, vtos); | |
784 __ pop_f(); | |
785 locals_index_wide(rbx); | |
786 __ movflt(faddress(rbx), xmm0); | |
787 __ tag_local(frame::TagValue, rbx); | |
788 } | |
789 | |
790 void TemplateTable::wide_dstore() { | |
791 transition(vtos, vtos); | |
792 __ pop_d(); | |
793 locals_index_wide(rbx); | |
794 __ movdbl(daddress(rbx), xmm0); | |
795 __ tag_local(frame::TagCategory2, rbx); | |
796 } | |
797 | |
798 void TemplateTable::wide_astore() { | |
799 transition(vtos, vtos); | |
800 __ pop_ptr(rax, rdx); // will need to pop tag too | |
801 locals_index_wide(rbx); | |
304 | 802 __ movptr(aaddress(rbx), rax); |
0 | 803 __ tag_local(rdx, rbx); // store tag from stack, might be returnAddr |
804 } | |
805 | |
806 void TemplateTable::iastore() { | |
807 transition(itos, vtos); | |
808 __ pop_i(rbx); | |
809 __ pop_ptr(rdx); | |
810 // eax: value | |
811 // ebx: index | |
812 // rdx: array | |
813 index_check(rdx, rbx); // prefer index in ebx | |
814 __ movl(Address(rdx, rbx, | |
815 Address::times_4, | |
816 arrayOopDesc::base_offset_in_bytes(T_INT)), | |
817 rax); | |
818 } | |
819 | |
820 void TemplateTable::lastore() { | |
821 transition(ltos, vtos); | |
822 __ pop_i(rbx); | |
823 __ pop_ptr(rdx); | |
824 // rax: value | |
825 // ebx: index | |
826 // rdx: array | |
827 index_check(rdx, rbx); // prefer index in ebx | |
828 __ movq(Address(rdx, rbx, | |
829 Address::times_8, | |
830 arrayOopDesc::base_offset_in_bytes(T_LONG)), | |
831 rax); | |
832 } | |
833 | |
834 void TemplateTable::fastore() { | |
835 transition(ftos, vtos); | |
836 __ pop_i(rbx); | |
837 __ pop_ptr(rdx); | |
838 // xmm0: value | |
839 // ebx: index | |
840 // rdx: array | |
841 index_check(rdx, rbx); // prefer index in ebx | |
842 __ movflt(Address(rdx, rbx, | |
843 Address::times_4, | |
844 arrayOopDesc::base_offset_in_bytes(T_FLOAT)), | |
845 xmm0); | |
846 } | |
847 | |
848 void TemplateTable::dastore() { | |
849 transition(dtos, vtos); | |
850 __ pop_i(rbx); | |
851 __ pop_ptr(rdx); | |
852 // xmm0: value | |
853 // ebx: index | |
854 // rdx: array | |
855 index_check(rdx, rbx); // prefer index in ebx | |
856 __ movdbl(Address(rdx, rbx, | |
857 Address::times_8, | |
858 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)), | |
859 xmm0); | |
860 } | |
861 | |
862 void TemplateTable::aastore() { | |
863 Label is_null, ok_is_subtype, done; | |
864 transition(vtos, vtos); | |
865 // stack: ..., array, index, value | |
304 | 866 __ movptr(rax, at_tos()); // value |
0 | 867 __ movl(rcx, at_tos_p1()); // index |
304 | 868 __ movptr(rdx, at_tos_p2()); // array |
0 | 869 index_check(rdx, rcx); // kills rbx |
870 // do array store check - check for NULL value first | |
304 | 871 __ testptr(rax, rax); |
0 | 872 __ jcc(Assembler::zero, is_null); |
873 | |
874 // Move subklass into rbx | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
875 __ load_klass(rbx, rax); |
0 | 876 // Move superklass into rax |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
877 __ load_klass(rax, rdx); |
304 | 878 __ movptr(rax, Address(rax, |
879 sizeof(oopDesc) + | |
880 objArrayKlass::element_klass_offset_in_bytes())); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
881 // Compress array + index*oopSize + 12 into a single register. Frees rcx. |
304 | 882 __ lea(rdx, Address(rdx, rcx, |
883 UseCompressedOops ? Address::times_4 : Address::times_8, | |
884 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); | |
0 | 885 |
886 // Generate subtype check. Blows rcx, rdi | |
887 // Superklass in rax. Subklass in rbx. | |
888 __ gen_subtype_check(rbx, ok_is_subtype); | |
889 | |
890 // Come here on failure | |
891 // object is at TOS | |
892 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); | |
893 | |
894 // Come here on success | |
895 __ bind(ok_is_subtype); | |
304 | 896 __ movptr(rax, at_tos()); // Value |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
897 __ store_heap_oop(Address(rdx, 0), rax); |
0 | 898 __ store_check(rdx); |
899 __ jmp(done); | |
900 | |
901 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] | |
902 __ bind(is_null); | |
903 __ profile_null_seen(rbx); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
904 __ store_heap_oop(Address(rdx, rcx, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
905 UseCompressedOops ? Address::times_4 : Address::times_8, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
906 arrayOopDesc::base_offset_in_bytes(T_OBJECT)), |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
907 rax); |
0 | 908 |
909 // Pop stack arguments | |
910 __ bind(done); | |
304 | 911 __ addptr(rsp, 3 * Interpreter::stackElementSize()); |
0 | 912 } |
913 | |
914 void TemplateTable::bastore() { | |
915 transition(itos, vtos); | |
916 __ pop_i(rbx); | |
917 __ pop_ptr(rdx); | |
918 // eax: value | |
919 // ebx: index | |
920 // rdx: array | |
921 index_check(rdx, rbx); // prefer index in ebx | |
922 __ movb(Address(rdx, rbx, | |
923 Address::times_1, | |
924 arrayOopDesc::base_offset_in_bytes(T_BYTE)), | |
925 rax); | |
926 } | |
927 | |
928 void TemplateTable::castore() { | |
929 transition(itos, vtos); | |
930 __ pop_i(rbx); | |
931 __ pop_ptr(rdx); | |
932 // eax: value | |
933 // ebx: index | |
934 // rdx: array | |
935 index_check(rdx, rbx); // prefer index in ebx | |
936 __ movw(Address(rdx, rbx, | |
937 Address::times_2, | |
938 arrayOopDesc::base_offset_in_bytes(T_CHAR)), | |
939 rax); | |
940 } | |
941 | |
942 void TemplateTable::sastore() { | |
943 castore(); | |
944 } | |
945 | |
946 void TemplateTable::istore(int n) { | |
947 transition(itos, vtos); | |
948 __ movl(iaddress(n), rax); | |
949 __ tag_local(frame::TagValue, n); | |
950 } | |
951 | |
952 void TemplateTable::lstore(int n) { | |
953 transition(ltos, vtos); | |
954 __ movq(laddress(n), rax); | |
955 __ tag_local(frame::TagCategory2, n); | |
956 } | |
957 | |
958 void TemplateTable::fstore(int n) { | |
959 transition(ftos, vtos); | |
960 __ movflt(faddress(n), xmm0); | |
961 __ tag_local(frame::TagValue, n); | |
962 } | |
963 | |
964 void TemplateTable::dstore(int n) { | |
965 transition(dtos, vtos); | |
966 __ movdbl(daddress(n), xmm0); | |
967 __ tag_local(frame::TagCategory2, n); | |
968 } | |
969 | |
970 void TemplateTable::astore(int n) { | |
971 transition(vtos, vtos); | |
972 __ pop_ptr(rax, rdx); | |
304 | 973 __ movptr(aaddress(n), rax); |
0 | 974 __ tag_local(rdx, n); |
975 } | |
976 | |
977 void TemplateTable::pop() { | |
978 transition(vtos, vtos); | |
304 | 979 __ addptr(rsp, Interpreter::stackElementSize()); |
0 | 980 } |
981 | |
982 void TemplateTable::pop2() { | |
983 transition(vtos, vtos); | |
304 | 984 __ addptr(rsp, 2 * Interpreter::stackElementSize()); |
0 | 985 } |
986 | |
987 void TemplateTable::dup() { | |
988 transition(vtos, vtos); | |
989 __ load_ptr_and_tag(0, rax, rdx); | |
990 __ push_ptr(rax, rdx); | |
991 // stack: ..., a, a | |
992 } | |
993 | |
994 void TemplateTable::dup_x1() { | |
995 transition(vtos, vtos); | |
996 // stack: ..., a, b | |
997 __ load_ptr_and_tag(0, rax, rdx); // load b | |
998 __ load_ptr_and_tag(1, rcx, rbx); // load a | |
999 __ store_ptr_and_tag(1, rax, rdx); // store b | |
1000 __ store_ptr_and_tag(0, rcx, rbx); // store a | |
1001 __ push_ptr(rax, rdx); // push b | |
1002 // stack: ..., b, a, b | |
1003 } | |
1004 | |
1005 void TemplateTable::dup_x2() { | |
1006 transition(vtos, vtos); | |
1007 // stack: ..., a, b, c | |
1008 __ load_ptr_and_tag(0, rax, rdx); // load c | |
1009 __ load_ptr_and_tag(2, rcx, rbx); // load a | |
1010 __ store_ptr_and_tag(2, rax, rdx); // store c in a | |
1011 __ push_ptr(rax, rdx); // push c | |
1012 // stack: ..., c, b, c, c | |
1013 __ load_ptr_and_tag(2, rax, rdx); // load b | |
1014 __ store_ptr_and_tag(2, rcx, rbx); // store a in b | |
1015 // stack: ..., c, a, c, c | |
1016 __ store_ptr_and_tag(1, rax, rdx); // store b in c | |
1017 // stack: ..., c, a, b, c | |
1018 } | |
1019 | |
1020 void TemplateTable::dup2() { | |
1021 transition(vtos, vtos); | |
1022 // stack: ..., a, b | |
1023 __ load_ptr_and_tag(1, rax, rdx); // load a | |
1024 __ push_ptr(rax, rdx); // push a | |
1025 __ load_ptr_and_tag(1, rax, rdx); // load b | |
1026 __ push_ptr(rax, rdx); // push b | |
1027 // stack: ..., a, b, a, b | |
1028 } | |
1029 | |
1030 void TemplateTable::dup2_x1() { | |
1031 transition(vtos, vtos); | |
1032 // stack: ..., a, b, c | |
1033 __ load_ptr_and_tag(0, rcx, rbx); // load c | |
1034 __ load_ptr_and_tag(1, rax, rdx); // load b | |
1035 __ push_ptr(rax, rdx); // push b | |
1036 __ push_ptr(rcx, rbx); // push c | |
1037 // stack: ..., a, b, c, b, c | |
1038 __ store_ptr_and_tag(3, rcx, rbx); // store c in b | |
1039 // stack: ..., a, c, c, b, c | |
1040 __ load_ptr_and_tag(4, rcx, rbx); // load a | |
1041 __ store_ptr_and_tag(2, rcx, rbx); // store a in 2nd c | |
1042 // stack: ..., a, c, a, b, c | |
1043 __ store_ptr_and_tag(4, rax, rdx); // store b in a | |
1044 // stack: ..., b, c, a, b, c | |
1045 } | |
1046 | |
1047 void TemplateTable::dup2_x2() { | |
1048 transition(vtos, vtos); | |
1049 // stack: ..., a, b, c, d | |
1050 __ load_ptr_and_tag(0, rcx, rbx); // load d | |
1051 __ load_ptr_and_tag(1, rax, rdx); // load c | |
1052 __ push_ptr(rax, rdx); // push c | |
1053 __ push_ptr(rcx, rbx); // push d | |
1054 // stack: ..., a, b, c, d, c, d | |
1055 __ load_ptr_and_tag(4, rax, rdx); // load b | |
1056 __ store_ptr_and_tag(2, rax, rdx); // store b in d | |
1057 __ store_ptr_and_tag(4, rcx, rbx); // store d in b | |
1058 // stack: ..., a, d, c, b, c, d | |
1059 __ load_ptr_and_tag(5, rcx, rbx); // load a | |
1060 __ load_ptr_and_tag(3, rax, rdx); // load c | |
1061 __ store_ptr_and_tag(3, rcx, rbx); // store a in c | |
1062 __ store_ptr_and_tag(5, rax, rdx); // store c in a | |
1063 // stack: ..., c, d, a, b, c, d | |
1064 } | |
1065 | |
1066 void TemplateTable::swap() { | |
1067 transition(vtos, vtos); | |
1068 // stack: ..., a, b | |
1069 __ load_ptr_and_tag(1, rcx, rbx); // load a | |
1070 __ load_ptr_and_tag(0, rax, rdx); // load b | |
1071 __ store_ptr_and_tag(0, rcx, rbx); // store a in b | |
1072 __ store_ptr_and_tag(1, rax, rdx); // store b in a | |
1073 // stack: ..., b, a | |
1074 } | |
1075 | |
1076 void TemplateTable::iop2(Operation op) { | |
1077 transition(itos, itos); | |
1078 switch (op) { | |
1079 case add : __ pop_i(rdx); __ addl (rax, rdx); break; | |
1080 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break; | |
1081 case mul : __ pop_i(rdx); __ imull(rax, rdx); break; | |
1082 case _and : __ pop_i(rdx); __ andl (rax, rdx); break; | |
1083 case _or : __ pop_i(rdx); __ orl (rax, rdx); break; | |
1084 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break; | |
1085 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break; | |
1086 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break; | |
1087 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break; | |
1088 default : ShouldNotReachHere(); | |
1089 } | |
1090 } | |
1091 | |
1092 void TemplateTable::lop2(Operation op) { | |
1093 transition(ltos, ltos); | |
1094 switch (op) { | |
304 | 1095 case add : __ pop_l(rdx); __ addptr (rax, rdx); break; |
1096 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr (rax, rdx); break; | |
1097 case _and : __ pop_l(rdx); __ andptr (rax, rdx); break; | |
1098 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break; | |
1099 case _xor : __ pop_l(rdx); __ xorptr (rax, rdx); break; | |
0 | 1100 default : ShouldNotReachHere(); |
1101 } | |
1102 } | |
1103 | |
1104 void TemplateTable::idiv() { | |
1105 transition(itos, itos); | |
1106 __ movl(rcx, rax); | |
1107 __ pop_i(rax); | |
1108 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If | |
1109 // they are not equal, one could do a normal division (no correction | |
1110 // needed), which may speed up this implementation for the common case. | |
1111 // (see also JVM spec., p.243 & p.271) | |
1112 __ corrected_idivl(rcx); | |
1113 } | |
1114 | |
1115 void TemplateTable::irem() { | |
1116 transition(itos, itos); | |
1117 __ movl(rcx, rax); | |
1118 __ pop_i(rax); | |
1119 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If | |
1120 // they are not equal, one could do a normal division (no correction | |
1121 // needed), which may speed up this implementation for the common case. | |
1122 // (see also JVM spec., p.243 & p.271) | |
1123 __ corrected_idivl(rcx); | |
1124 __ movl(rax, rdx); | |
1125 } | |
1126 | |
1127 void TemplateTable::lmul() { | |
1128 transition(ltos, ltos); | |
1129 __ pop_l(rdx); | |
1130 __ imulq(rax, rdx); | |
1131 } | |
1132 | |
1133 void TemplateTable::ldiv() { | |
1134 transition(ltos, ltos); | |
304 | 1135 __ mov(rcx, rax); |
0 | 1136 __ pop_l(rax); |
1137 // generate explicit div0 check | |
1138 __ testq(rcx, rcx); | |
1139 __ jump_cc(Assembler::zero, | |
1140 ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); | |
1141 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If | |
1142 // they are not equal, one could do a normal division (no correction | |
1143 // needed), which may speed up this implementation for the common case. | |
1144 // (see also JVM spec., p.243 & p.271) | |
1145 __ corrected_idivq(rcx); // kills rbx | |
1146 } | |
1147 | |
1148 void TemplateTable::lrem() { | |
1149 transition(ltos, ltos); | |
304 | 1150 __ mov(rcx, rax); |
0 | 1151 __ pop_l(rax); |
1152 __ testq(rcx, rcx); | |
1153 __ jump_cc(Assembler::zero, | |
1154 ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); | |
1155 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If | |
1156 // they are not equal, one could do a normal division (no correction | |
1157 // needed), which may speed up this implementation for the common case. | |
1158 // (see also JVM spec., p.243 & p.271) | |
1159 __ corrected_idivq(rcx); // kills rbx | |
304 | 1160 __ mov(rax, rdx); |
0 | 1161 } |
1162 | |
1163 void TemplateTable::lshl() { | |
1164 transition(itos, ltos); | |
1165 __ movl(rcx, rax); // get shift count | |
1166 __ pop_l(rax); // get shift value | |
1167 __ shlq(rax); | |
1168 } | |
1169 | |
1170 void TemplateTable::lshr() { | |
1171 transition(itos, ltos); | |
1172 __ movl(rcx, rax); // get shift count | |
1173 __ pop_l(rax); // get shift value | |
1174 __ sarq(rax); | |
1175 } | |
1176 | |
1177 void TemplateTable::lushr() { | |
1178 transition(itos, ltos); | |
1179 __ movl(rcx, rax); // get shift count | |
1180 __ pop_l(rax); // get shift value | |
1181 __ shrq(rax); | |
1182 } | |
1183 | |
1184 void TemplateTable::fop2(Operation op) { | |
1185 transition(ftos, ftos); | |
1186 switch (op) { | |
1187 case add: | |
1188 __ addss(xmm0, at_rsp()); | |
304 | 1189 __ addptr(rsp, Interpreter::stackElementSize()); |
0 | 1190 break; |
1191 case sub: | |
1192 __ movflt(xmm1, xmm0); | |
1193 __ pop_f(xmm0); | |
1194 __ subss(xmm0, xmm1); | |
1195 break; | |
1196 case mul: | |
1197 __ mulss(xmm0, at_rsp()); | |
304 | 1198 __ addptr(rsp, Interpreter::stackElementSize()); |
0 | 1199 break; |
1200 case div: | |
1201 __ movflt(xmm1, xmm0); | |
1202 __ pop_f(xmm0); | |
1203 __ divss(xmm0, xmm1); | |
1204 break; | |
1205 case rem: | |
1206 __ movflt(xmm1, xmm0); | |
1207 __ pop_f(xmm0); | |
1208 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2); | |
1209 break; | |
1210 default: | |
1211 ShouldNotReachHere(); | |
1212 break; | |
1213 } | |
1214 } | |
1215 | |
1216 void TemplateTable::dop2(Operation op) { | |
1217 transition(dtos, dtos); | |
1218 switch (op) { | |
1219 case add: | |
1220 __ addsd(xmm0, at_rsp()); | |
304 | 1221 __ addptr(rsp, 2 * Interpreter::stackElementSize()); |
0 | 1222 break; |
1223 case sub: | |
1224 __ movdbl(xmm1, xmm0); | |
1225 __ pop_d(xmm0); | |
1226 __ subsd(xmm0, xmm1); | |
1227 break; | |
1228 case mul: | |
1229 __ mulsd(xmm0, at_rsp()); | |
304 | 1230 __ addptr(rsp, 2 * Interpreter::stackElementSize()); |
0 | 1231 break; |
1232 case div: | |
1233 __ movdbl(xmm1, xmm0); | |
1234 __ pop_d(xmm0); | |
1235 __ divsd(xmm0, xmm1); | |
1236 break; | |
1237 case rem: | |
1238 __ movdbl(xmm1, xmm0); | |
1239 __ pop_d(xmm0); | |
1240 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2); | |
1241 break; | |
1242 default: | |
1243 ShouldNotReachHere(); | |
1244 break; | |
1245 } | |
1246 } | |
1247 | |
1248 void TemplateTable::ineg() { | |
1249 transition(itos, itos); | |
1250 __ negl(rax); | |
1251 } | |
1252 | |
1253 void TemplateTable::lneg() { | |
1254 transition(ltos, ltos); | |
1255 __ negq(rax); | |
1256 } | |
1257 | |
1258 // Note: 'double' and 'long long' have 32-bits alignment on x86. | |
1259 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { | |
1260 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address | |
1261 // of 128-bits operands for SSE instructions. | |
1262 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF))); | |
1263 // Store the value to a 128-bits operand. | |
1264 operand[0] = lo; | |
1265 operand[1] = hi; | |
1266 return operand; | |
1267 } | |
1268 | |
1269 // Buffer for 128-bits masks used by SSE instructions. | |
1270 static jlong float_signflip_pool[2*2]; | |
1271 static jlong double_signflip_pool[2*2]; | |
1272 | |
1273 void TemplateTable::fneg() { | |
1274 transition(ftos, ftos); | |
1275 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000); | |
1276 __ xorps(xmm0, ExternalAddress((address) float_signflip)); | |
1277 } | |
1278 | |
1279 void TemplateTable::dneg() { | |
1280 transition(dtos, dtos); | |
1281 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000); | |
1282 __ xorpd(xmm0, ExternalAddress((address) double_signflip)); | |
1283 } | |
1284 | |
1285 void TemplateTable::iinc() { | |
1286 transition(vtos, vtos); | |
1287 __ load_signed_byte(rdx, at_bcp(2)); // get constant | |
1288 locals_index(rbx); | |
1289 __ addl(iaddress(rbx), rdx); | |
1290 } | |
1291 | |
1292 void TemplateTable::wide_iinc() { | |
1293 transition(vtos, vtos); | |
1294 __ movl(rdx, at_bcp(4)); // get constant | |
1295 locals_index_wide(rbx); | |
1296 __ bswapl(rdx); // swap bytes & sign-extend constant | |
1297 __ sarl(rdx, 16); | |
1298 __ addl(iaddress(rbx), rdx); | |
1299 // Note: should probably use only one movl to get both | |
1300 // the index and the constant -> fix this | |
1301 } | |
1302 | |
1303 void TemplateTable::convert() { | |
1304 // Checking | |
1305 #ifdef ASSERT | |
1306 { | |
1307 TosState tos_in = ilgl; | |
1308 TosState tos_out = ilgl; | |
1309 switch (bytecode()) { | |
1310 case Bytecodes::_i2l: // fall through | |
1311 case Bytecodes::_i2f: // fall through | |
1312 case Bytecodes::_i2d: // fall through | |
1313 case Bytecodes::_i2b: // fall through | |
1314 case Bytecodes::_i2c: // fall through | |
1315 case Bytecodes::_i2s: tos_in = itos; break; | |
1316 case Bytecodes::_l2i: // fall through | |
1317 case Bytecodes::_l2f: // fall through | |
1318 case Bytecodes::_l2d: tos_in = ltos; break; | |
1319 case Bytecodes::_f2i: // fall through | |
1320 case Bytecodes::_f2l: // fall through | |
1321 case Bytecodes::_f2d: tos_in = ftos; break; | |
1322 case Bytecodes::_d2i: // fall through | |
1323 case Bytecodes::_d2l: // fall through | |
1324 case Bytecodes::_d2f: tos_in = dtos; break; | |
1325 default : ShouldNotReachHere(); | |
1326 } | |
1327 switch (bytecode()) { | |
1328 case Bytecodes::_l2i: // fall through | |
1329 case Bytecodes::_f2i: // fall through | |
1330 case Bytecodes::_d2i: // fall through | |
1331 case Bytecodes::_i2b: // fall through | |
1332 case Bytecodes::_i2c: // fall through | |
1333 case Bytecodes::_i2s: tos_out = itos; break; | |
1334 case Bytecodes::_i2l: // fall through | |
1335 case Bytecodes::_f2l: // fall through | |
1336 case Bytecodes::_d2l: tos_out = ltos; break; | |
1337 case Bytecodes::_i2f: // fall through | |
1338 case Bytecodes::_l2f: // fall through | |
1339 case Bytecodes::_d2f: tos_out = ftos; break; | |
1340 case Bytecodes::_i2d: // fall through | |
1341 case Bytecodes::_l2d: // fall through | |
1342 case Bytecodes::_f2d: tos_out = dtos; break; | |
1343 default : ShouldNotReachHere(); | |
1344 } | |
1345 transition(tos_in, tos_out); | |
1346 } | |
1347 #endif // ASSERT | |
1348 | |
1349 static const int64_t is_nan = 0x8000000000000000L; | |
1350 | |
1351 // Conversion | |
1352 switch (bytecode()) { | |
1353 case Bytecodes::_i2l: | |
1354 __ movslq(rax, rax); | |
1355 break; | |
1356 case Bytecodes::_i2f: | |
1357 __ cvtsi2ssl(xmm0, rax); | |
1358 break; | |
1359 case Bytecodes::_i2d: | |
1360 __ cvtsi2sdl(xmm0, rax); | |
1361 break; | |
1362 case Bytecodes::_i2b: | |
1363 __ movsbl(rax, rax); | |
1364 break; | |
1365 case Bytecodes::_i2c: | |
1366 __ movzwl(rax, rax); | |
1367 break; | |
1368 case Bytecodes::_i2s: | |
1369 __ movswl(rax, rax); | |
1370 break; | |
1371 case Bytecodes::_l2i: | |
1372 __ movl(rax, rax); | |
1373 break; | |
1374 case Bytecodes::_l2f: | |
1375 __ cvtsi2ssq(xmm0, rax); | |
1376 break; | |
1377 case Bytecodes::_l2d: | |
1378 __ cvtsi2sdq(xmm0, rax); | |
1379 break; | |
1380 case Bytecodes::_f2i: | |
1381 { | |
1382 Label L; | |
1383 __ cvttss2sil(rax, xmm0); | |
1384 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? | |
1385 __ jcc(Assembler::notEqual, L); | |
1386 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1); | |
1387 __ bind(L); | |
1388 } | |
1389 break; | |
1390 case Bytecodes::_f2l: | |
1391 { | |
1392 Label L; | |
1393 __ cvttss2siq(rax, xmm0); | |
1394 // NaN or overflow/underflow? | |
1395 __ cmp64(rax, ExternalAddress((address) &is_nan)); | |
1396 __ jcc(Assembler::notEqual, L); | |
1397 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); | |
1398 __ bind(L); | |
1399 } | |
1400 break; | |
1401 case Bytecodes::_f2d: | |
1402 __ cvtss2sd(xmm0, xmm0); | |
1403 break; | |
1404 case Bytecodes::_d2i: | |
1405 { | |
1406 Label L; | |
1407 __ cvttsd2sil(rax, xmm0); | |
1408 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? | |
1409 __ jcc(Assembler::notEqual, L); | |
1410 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1); | |
1411 __ bind(L); | |
1412 } | |
1413 break; | |
1414 case Bytecodes::_d2l: | |
1415 { | |
1416 Label L; | |
1417 __ cvttsd2siq(rax, xmm0); | |
1418 // NaN or overflow/underflow? | |
1419 __ cmp64(rax, ExternalAddress((address) &is_nan)); | |
1420 __ jcc(Assembler::notEqual, L); | |
1421 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1); | |
1422 __ bind(L); | |
1423 } | |
1424 break; | |
1425 case Bytecodes::_d2f: | |
1426 __ cvtsd2ss(xmm0, xmm0); | |
1427 break; | |
1428 default: | |
1429 ShouldNotReachHere(); | |
1430 } | |
1431 } | |
1432 | |
1433 void TemplateTable::lcmp() { | |
1434 transition(ltos, itos); | |
1435 Label done; | |
1436 __ pop_l(rdx); | |
1437 __ cmpq(rdx, rax); | |
1438 __ movl(rax, -1); | |
1439 __ jccb(Assembler::less, done); | |
1440 __ setb(Assembler::notEqual, rax); | |
1441 __ movzbl(rax, rax); | |
1442 __ bind(done); | |
1443 } | |
1444 | |
1445 void TemplateTable::float_cmp(bool is_float, int unordered_result) { | |
1446 Label done; | |
1447 if (is_float) { | |
1448 // XXX get rid of pop here, use ... reg, mem32 | |
1449 __ pop_f(xmm1); | |
1450 __ ucomiss(xmm1, xmm0); | |
1451 } else { | |
1452 // XXX get rid of pop here, use ... reg, mem64 | |
1453 __ pop_d(xmm1); | |
1454 __ ucomisd(xmm1, xmm0); | |
1455 } | |
1456 if (unordered_result < 0) { | |
1457 __ movl(rax, -1); | |
1458 __ jccb(Assembler::parity, done); | |
1459 __ jccb(Assembler::below, done); | |
1460 __ setb(Assembler::notEqual, rdx); | |
1461 __ movzbl(rax, rdx); | |
1462 } else { | |
1463 __ movl(rax, 1); | |
1464 __ jccb(Assembler::parity, done); | |
1465 __ jccb(Assembler::above, done); | |
1466 __ movl(rax, 0); | |
1467 __ jccb(Assembler::equal, done); | |
1468 __ decrementl(rax); | |
1469 } | |
1470 __ bind(done); | |
1471 } | |
1472 | |
1473 void TemplateTable::branch(bool is_jsr, bool is_wide) { | |
1474 __ get_method(rcx); // rcx holds method | |
1475 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx | |
1476 // holds bumped taken count | |
1477 | |
1478 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + | |
1479 InvocationCounter::counter_offset(); | |
1480 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + | |
1481 InvocationCounter::counter_offset(); | |
1482 const int method_offset = frame::interpreter_frame_method_offset * wordSize; | |
1483 | |
1484 // Load up edx with the branch displacement | |
1485 __ movl(rdx, at_bcp(1)); | |
1486 __ bswapl(rdx); | |
1487 | |
1488 if (!is_wide) { | |
1489 __ sarl(rdx, 16); | |
1490 } | |
304 | 1491 __ movl2ptr(rdx, rdx); |
0 | 1492 |
1493 // Handle all the JSR stuff here, then exit. | |
1494 // It's much shorter and cleaner than intermingling with the non-JSR | |
1495 // normal-branch stuff occuring below. | |
1496 if (is_jsr) { | |
1497 // Pre-load the next target bytecode into rbx | |
1498 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0)); | |
1499 | |
1500 // compute return address as bci in rax | |
304 | 1501 __ lea(rax, at_bcp((is_wide ? 5 : 3) - |
0 | 1502 in_bytes(constMethodOopDesc::codes_offset()))); |
304 | 1503 __ subptr(rax, Address(rcx, methodOopDesc::const_offset())); |
0 | 1504 // Adjust the bcp in r13 by the displacement in rdx |
304 | 1505 __ addptr(r13, rdx); |
0 | 1506 // jsr returns atos that is not an oop |
1507 __ push_i(rax); | |
1508 __ dispatch_only(vtos); | |
1509 return; | |
1510 } | |
1511 | |
1512 // Normal (non-jsr) branch handling | |
1513 | |
1514 // Adjust the bcp in r13 by the displacement in rdx | |
304 | 1515 __ addptr(r13, rdx); |
0 | 1516 |
1517 assert(UseLoopCounter || !UseOnStackReplacement, | |
1518 "on-stack-replacement requires loop counters"); | |
1519 Label backedge_counter_overflow; | |
1520 Label profile_method; | |
1521 Label dispatch; | |
1522 if (UseLoopCounter) { | |
1523 // increment backedge counter for backward branches | |
1524 // rax: MDO | |
1525 // ebx: MDO bumped taken-count | |
1526 // rcx: method | |
1527 // rdx: target offset | |
1528 // r13: target bcp | |
1529 // r14: locals pointer | |
1530 __ testl(rdx, rdx); // check if forward or backward branch | |
1531 __ jcc(Assembler::positive, dispatch); // count only if backward branch | |
1532 | |
1533 // increment counter | |
1534 __ movl(rax, Address(rcx, be_offset)); // load backedge counter | |
1535 __ incrementl(rax, InvocationCounter::count_increment); // increment | |
1536 // counter | |
1537 __ movl(Address(rcx, be_offset), rax); // store counter | |
1538 | |
1539 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter | |
1540 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits | |
1541 __ addl(rax, Address(rcx, be_offset)); // add both counters | |
1542 | |
1543 if (ProfileInterpreter) { | |
1544 // Test to see if we should create a method data oop | |
1545 __ cmp32(rax, | |
1546 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit)); | |
1547 __ jcc(Assembler::less, dispatch); | |
1548 | |
1549 // if no method data exists, go to profile method | |
1550 __ test_method_data_pointer(rax, profile_method); | |
1551 | |
1552 if (UseOnStackReplacement) { | |
1553 // check for overflow against ebx which is the MDO taken count | |
1554 __ cmp32(rbx, | |
1555 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); | |
1556 __ jcc(Assembler::below, dispatch); | |
1557 | |
1558 // When ProfileInterpreter is on, the backedge_count comes | |
1559 // from the methodDataOop, which value does not get reset on | |
1560 // the call to frequency_counter_overflow(). To avoid | |
1561 // excessive calls to the overflow routine while the method is | |
1562 // being compiled, add a second test to make sure the overflow | |
1563 // function is called only once every overflow_frequency. | |
1564 const int overflow_frequency = 1024; | |
1565 __ andl(rbx, overflow_frequency - 1); | |
1566 __ jcc(Assembler::zero, backedge_counter_overflow); | |
1567 | |
1568 } | |
1569 } else { | |
1570 if (UseOnStackReplacement) { | |
1571 // check for overflow against eax, which is the sum of the | |
1572 // counters | |
1573 __ cmp32(rax, | |
1574 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); | |
1575 __ jcc(Assembler::aboveEqual, backedge_counter_overflow); | |
1576 | |
1577 } | |
1578 } | |
1579 __ bind(dispatch); | |
1580 } | |
1581 | |
1582 // Pre-load the next target bytecode into rbx | |
1583 __ load_unsigned_byte(rbx, Address(r13, 0)); | |
1584 | |
1585 // continue with the bytecode @ target | |
1586 // eax: return bci for jsr's, unused otherwise | |
1587 // ebx: target bytecode | |
1588 // r13: target bcp | |
1589 __ dispatch_only(vtos); | |
1590 | |
1591 if (UseLoopCounter) { | |
1592 if (ProfileInterpreter) { | |
1593 // Out-of-line code to allocate method data oop. | |
1594 __ bind(profile_method); | |
1595 __ call_VM(noreg, | |
1596 CAST_FROM_FN_PTR(address, | |
1597 InterpreterRuntime::profile_method), r13); | |
1598 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode | |
304 | 1599 __ movptr(rcx, Address(rbp, method_offset)); |
1600 __ movptr(rcx, Address(rcx, | |
1601 in_bytes(methodOopDesc::method_data_offset()))); | |
1602 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), | |
1603 rcx); | |
0 | 1604 __ test_method_data_pointer(rcx, dispatch); |
1605 // offset non-null mdp by MDO::data_offset() + IR::profile_method() | |
304 | 1606 __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset())); |
1607 __ addptr(rcx, rax); | |
1608 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), | |
1609 rcx); | |
0 | 1610 __ jmp(dispatch); |
1611 } | |
1612 | |
1613 if (UseOnStackReplacement) { | |
1614 // invocation counter overflow | |
1615 __ bind(backedge_counter_overflow); | |
304 | 1616 __ negptr(rdx); |
1617 __ addptr(rdx, r13); // branch bcp | |
0 | 1618 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) |
1619 __ call_VM(noreg, | |
1620 CAST_FROM_FN_PTR(address, | |
1621 InterpreterRuntime::frequency_counter_overflow), | |
1622 rdx); | |
1623 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode | |
1624 | |
1625 // rax: osr nmethod (osr ok) or NULL (osr not possible) | |
1626 // ebx: target bytecode | |
1627 // rdx: scratch | |
1628 // r14: locals pointer | |
1629 // r13: bcp | |
304 | 1630 __ testptr(rax, rax); // test result |
0 | 1631 __ jcc(Assembler::zero, dispatch); // no osr if null |
1632 // nmethod may have been invalidated (VM may block upon call_VM return) | |
1633 __ movl(rcx, Address(rax, nmethod::entry_bci_offset())); | |
1634 __ cmpl(rcx, InvalidOSREntryBci); | |
1635 __ jcc(Assembler::equal, dispatch); | |
1636 | |
1637 // We have the address of an on stack replacement routine in eax | |
1638 // We need to prepare to execute the OSR method. First we must | |
1639 // migrate the locals and monitors off of the stack. | |
1640 | |
304 | 1641 __ mov(r13, rax); // save the nmethod |
0 | 1642 |
1643 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); | |
1644 | |
1645 // eax is OSR buffer, move it to expected parameter location | |
304 | 1646 __ mov(j_rarg0, rax); |
0 | 1647 |
1648 // We use j_rarg definitions here so that registers don't conflict as parameter | |
1649 // registers change across platforms as we are in the midst of a calling | |
1650 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters. | |
1651 | |
1652 const Register retaddr = j_rarg2; | |
1653 const Register sender_sp = j_rarg1; | |
1654 | |
1655 // pop the interpreter frame | |
304 | 1656 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp |
0 | 1657 __ leave(); // remove frame anchor |
304 | 1658 __ pop(retaddr); // get return address |
1659 __ mov(rsp, sender_sp); // set sp to sender sp | |
0 | 1660 // Ensure compiled code always sees stack at proper alignment |
304 | 1661 __ andptr(rsp, -(StackAlignmentInBytes)); |
0 | 1662 |
1663 // unlike x86 we need no specialized return from compiled code | |
1664 // to the interpreter or the call stub. | |
1665 | |
1666 // push the return address | |
304 | 1667 __ push(retaddr); |
0 | 1668 |
1669 // and begin the OSR nmethod | |
1670 __ jmp(Address(r13, nmethod::osr_entry_point_offset())); | |
1671 } | |
1672 } | |
1673 } | |
1674 | |
1675 | |
1676 void TemplateTable::if_0cmp(Condition cc) { | |
1677 transition(itos, vtos); | |
1678 // assume branch is more often taken than not (loops use backward branches) | |
1679 Label not_taken; | |
1680 __ testl(rax, rax); | |
1681 __ jcc(j_not(cc), not_taken); | |
1682 branch(false, false); | |
1683 __ bind(not_taken); | |
1684 __ profile_not_taken_branch(rax); | |
1685 } | |
1686 | |
1687 void TemplateTable::if_icmp(Condition cc) { | |
1688 transition(itos, vtos); | |
1689 // assume branch is more often taken than not (loops use backward branches) | |
1690 Label not_taken; | |
1691 __ pop_i(rdx); | |
1692 __ cmpl(rdx, rax); | |
1693 __ jcc(j_not(cc), not_taken); | |
1694 branch(false, false); | |
1695 __ bind(not_taken); | |
1696 __ profile_not_taken_branch(rax); | |
1697 } | |
1698 | |
1699 void TemplateTable::if_nullcmp(Condition cc) { | |
1700 transition(atos, vtos); | |
1701 // assume branch is more often taken than not (loops use backward branches) | |
1702 Label not_taken; | |
304 | 1703 __ testptr(rax, rax); |
0 | 1704 __ jcc(j_not(cc), not_taken); |
1705 branch(false, false); | |
1706 __ bind(not_taken); | |
1707 __ profile_not_taken_branch(rax); | |
1708 } | |
1709 | |
1710 void TemplateTable::if_acmp(Condition cc) { | |
1711 transition(atos, vtos); | |
1712 // assume branch is more often taken than not (loops use backward branches) | |
1713 Label not_taken; | |
1714 __ pop_ptr(rdx); | |
304 | 1715 __ cmpptr(rdx, rax); |
0 | 1716 __ jcc(j_not(cc), not_taken); |
1717 branch(false, false); | |
1718 __ bind(not_taken); | |
1719 __ profile_not_taken_branch(rax); | |
1720 } | |
1721 | |
1722 void TemplateTable::ret() { | |
1723 transition(vtos, vtos); | |
1724 locals_index(rbx); | |
304 | 1725 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp |
0 | 1726 __ profile_ret(rbx, rcx); |
1727 __ get_method(rax); | |
304 | 1728 __ movptr(r13, Address(rax, methodOopDesc::const_offset())); |
1729 __ lea(r13, Address(r13, rbx, Address::times_1, | |
1730 constMethodOopDesc::codes_offset())); | |
0 | 1731 __ dispatch_next(vtos); |
1732 } | |
1733 | |
1734 void TemplateTable::wide_ret() { | |
1735 transition(vtos, vtos); | |
1736 locals_index_wide(rbx); | |
304 | 1737 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp |
0 | 1738 __ profile_ret(rbx, rcx); |
1739 __ get_method(rax); | |
304 | 1740 __ movptr(r13, Address(rax, methodOopDesc::const_offset())); |
1741 __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset())); | |
0 | 1742 __ dispatch_next(vtos); |
1743 } | |
1744 | |
1745 void TemplateTable::tableswitch() { | |
1746 Label default_case, continue_execution; | |
1747 transition(itos, vtos); | |
1748 // align r13 | |
304 | 1749 __ lea(rbx, at_bcp(BytesPerInt)); |
1750 __ andptr(rbx, -BytesPerInt); | |
0 | 1751 // load lo & hi |
1752 __ movl(rcx, Address(rbx, BytesPerInt)); | |
1753 __ movl(rdx, Address(rbx, 2 * BytesPerInt)); | |
1754 __ bswapl(rcx); | |
1755 __ bswapl(rdx); | |
1756 // check against lo & hi | |
1757 __ cmpl(rax, rcx); | |
1758 __ jcc(Assembler::less, default_case); | |
1759 __ cmpl(rax, rdx); | |
1760 __ jcc(Assembler::greater, default_case); | |
1761 // lookup dispatch offset | |
1762 __ subl(rax, rcx); | |
1763 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt)); | |
1764 __ profile_switch_case(rax, rbx, rcx); | |
1765 // continue execution | |
1766 __ bind(continue_execution); | |
1767 __ bswapl(rdx); | |
304 | 1768 __ movl2ptr(rdx, rdx); |
0 | 1769 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); |
304 | 1770 __ addptr(r13, rdx); |
0 | 1771 __ dispatch_only(vtos); |
1772 // handle default | |
1773 __ bind(default_case); | |
1774 __ profile_switch_default(rax); | |
1775 __ movl(rdx, Address(rbx, 0)); | |
1776 __ jmp(continue_execution); | |
1777 } | |
1778 | |
1779 void TemplateTable::lookupswitch() { | |
1780 transition(itos, itos); | |
1781 __ stop("lookupswitch bytecode should have been rewritten"); | |
1782 } | |
1783 | |
1784 void TemplateTable::fast_linearswitch() { | |
1785 transition(itos, vtos); | |
1786 Label loop_entry, loop, found, continue_execution; | |
1787 // bswap rax so we can avoid bswapping the table entries | |
1788 __ bswapl(rax); | |
1789 // align r13 | |
304 | 1790 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of |
1791 // this instruction (change offsets | |
1792 // below) | |
1793 __ andptr(rbx, -BytesPerInt); | |
0 | 1794 // set counter |
1795 __ movl(rcx, Address(rbx, BytesPerInt)); | |
1796 __ bswapl(rcx); | |
1797 __ jmpb(loop_entry); | |
1798 // table search | |
1799 __ bind(loop); | |
1800 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt)); | |
1801 __ jcc(Assembler::equal, found); | |
1802 __ bind(loop_entry); | |
1803 __ decrementl(rcx); | |
1804 __ jcc(Assembler::greaterEqual, loop); | |
1805 // default case | |
1806 __ profile_switch_default(rax); | |
1807 __ movl(rdx, Address(rbx, 0)); | |
1808 __ jmp(continue_execution); | |
1809 // entry found -> get offset | |
1810 __ bind(found); | |
1811 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt)); | |
1812 __ profile_switch_case(rcx, rax, rbx); | |
1813 // continue execution | |
1814 __ bind(continue_execution); | |
1815 __ bswapl(rdx); | |
304 | 1816 __ movl2ptr(rdx, rdx); |
0 | 1817 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); |
304 | 1818 __ addptr(r13, rdx); |
0 | 1819 __ dispatch_only(vtos); |
1820 } | |
1821 | |
1822 void TemplateTable::fast_binaryswitch() { | |
1823 transition(itos, vtos); | |
1824 // Implementation using the following core algorithm: | |
1825 // | |
1826 // int binary_search(int key, LookupswitchPair* array, int n) { | |
1827 // // Binary search according to "Methodik des Programmierens" by | |
1828 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. | |
1829 // int i = 0; | |
1830 // int j = n; | |
1831 // while (i+1 < j) { | |
1832 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) | |
1833 // // with Q: for all i: 0 <= i < n: key < a[i] | |
1834 // // where a stands for the array and assuming that the (inexisting) | |
1835 // // element a[n] is infinitely big. | |
1836 // int h = (i + j) >> 1; | |
1837 // // i < h < j | |
1838 // if (key < array[h].fast_match()) { | |
1839 // j = h; | |
1840 // } else { | |
1841 // i = h; | |
1842 // } | |
1843 // } | |
1844 // // R: a[i] <= key < a[i+1] or Q | |
1845 // // (i.e., if key is within array, i is the correct index) | |
1846 // return i; | |
1847 // } | |
1848 | |
1849 // Register allocation | |
1850 const Register key = rax; // already set (tosca) | |
1851 const Register array = rbx; | |
1852 const Register i = rcx; | |
1853 const Register j = rdx; | |
1854 const Register h = rdi; | |
1855 const Register temp = rsi; | |
1856 | |
1857 // Find array start | |
304 | 1858 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to |
1859 // get rid of this | |
1860 // instruction (change | |
1861 // offsets below) | |
1862 __ andptr(array, -BytesPerInt); | |
0 | 1863 |
1864 // Initialize i & j | |
1865 __ xorl(i, i); // i = 0; | |
1866 __ movl(j, Address(array, -BytesPerInt)); // j = length(array); | |
1867 | |
1868 // Convert j into native byteordering | |
1869 __ bswapl(j); | |
1870 | |
1871 // And start | |
1872 Label entry; | |
1873 __ jmp(entry); | |
1874 | |
1875 // binary search loop | |
1876 { | |
1877 Label loop; | |
1878 __ bind(loop); | |
1879 // int h = (i + j) >> 1; | |
1880 __ leal(h, Address(i, j, Address::times_1)); // h = i + j; | |
1881 __ sarl(h, 1); // h = (i + j) >> 1; | |
1882 // if (key < array[h].fast_match()) { | |
1883 // j = h; | |
1884 // } else { | |
1885 // i = h; | |
1886 // } | |
1887 // Convert array[h].match to native byte-ordering before compare | |
1888 __ movl(temp, Address(array, h, Address::times_8)); | |
1889 __ bswapl(temp); | |
1890 __ cmpl(key, temp); | |
1891 // j = h if (key < array[h].fast_match()) | |
1892 __ cmovl(Assembler::less, j, h); | |
1893 // i = h if (key >= array[h].fast_match()) | |
1894 __ cmovl(Assembler::greaterEqual, i, h); | |
1895 // while (i+1 < j) | |
1896 __ bind(entry); | |
1897 __ leal(h, Address(i, 1)); // i+1 | |
1898 __ cmpl(h, j); // i+1 < j | |
1899 __ jcc(Assembler::less, loop); | |
1900 } | |
1901 | |
1902 // end of binary search, result index is i (must check again!) | |
1903 Label default_case; | |
1904 // Convert array[i].match to native byte-ordering before compare | |
1905 __ movl(temp, Address(array, i, Address::times_8)); | |
1906 __ bswapl(temp); | |
1907 __ cmpl(key, temp); | |
1908 __ jcc(Assembler::notEqual, default_case); | |
1909 | |
1910 // entry found -> j = offset | |
1911 __ movl(j , Address(array, i, Address::times_8, BytesPerInt)); | |
1912 __ profile_switch_case(i, key, array); | |
1913 __ bswapl(j); | |
304 | 1914 __ movl2ptr(j, j); |
0 | 1915 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); |
304 | 1916 __ addptr(r13, j); |
0 | 1917 __ dispatch_only(vtos); |
1918 | |
1919 // default case -> j = default offset | |
1920 __ bind(default_case); | |
1921 __ profile_switch_default(i); | |
1922 __ movl(j, Address(array, -2 * BytesPerInt)); | |
1923 __ bswapl(j); | |
304 | 1924 __ movl2ptr(j, j); |
0 | 1925 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); |
304 | 1926 __ addptr(r13, j); |
0 | 1927 __ dispatch_only(vtos); |
1928 } | |
1929 | |
1930 | |
1931 void TemplateTable::_return(TosState state) { | |
1932 transition(state, state); | |
1933 assert(_desc->calls_vm(), | |
1934 "inconsistent calls_vm information"); // call in remove_activation | |
1935 | |
1936 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { | |
1937 assert(state == vtos, "only valid state"); | |
304 | 1938 __ movptr(c_rarg1, aaddress(0)); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
1939 __ load_klass(rdi, c_rarg1); |
0 | 1940 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); |
1941 __ testl(rdi, JVM_ACC_HAS_FINALIZER); | |
1942 Label skip_register_finalizer; | |
1943 __ jcc(Assembler::zero, skip_register_finalizer); | |
1944 | |
1945 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1); | |
1946 | |
1947 __ bind(skip_register_finalizer); | |
1948 } | |
1949 | |
1950 __ remove_activation(state, r13); | |
1951 __ jmp(r13); | |
1952 } | |
1953 | |
1954 // ---------------------------------------------------------------------------- | |
1955 // Volatile variables demand their effects be made known to all CPU's | |
1956 // in order. Store buffers on most chips allow reads & writes to | |
1957 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode | |
1958 // without some kind of memory barrier (i.e., it's not sufficient that | |
1959 // the interpreter does not reorder volatile references, the hardware | |
1960 // also must not reorder them). | |
1961 // | |
1962 // According to the new Java Memory Model (JMM): | |
1963 // (1) All volatiles are serialized wrt to each other. ALSO reads & | |
1964 // writes act as aquire & release, so: | |
1965 // (2) A read cannot let unrelated NON-volatile memory refs that | |
1966 // happen after the read float up to before the read. It's OK for | |
1967 // non-volatile memory refs that happen before the volatile read to | |
1968 // float down below it. | |
1969 // (3) Similar a volatile write cannot let unrelated NON-volatile | |
1970 // memory refs that happen BEFORE the write float down to after the | |
1971 // write. It's OK for non-volatile memory refs that happen after the | |
1972 // volatile write to float up before it. | |
1973 // | |
1974 // We only put in barriers around volatile refs (they are expensive), | |
1975 // not _between_ memory refs (that would require us to track the | |
1976 // flavor of the previous memory refs). Requirements (2) and (3) | |
1977 // require some barriers before volatile stores and after volatile | |
1978 // loads. These nearly cover requirement (1) but miss the | |
1979 // volatile-store-volatile-load case. This final case is placed after | |
1980 // volatile-stores although it could just as well go before | |
1981 // volatile-loads. | |
1982 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits | |
1983 order_constraint) { | |
1984 // Helper function to insert a is-volatile test and memory barrier | |
1985 if (os::is_MP()) { // Not needed on single CPU | |
1986 __ membar(order_constraint); | |
1987 } | |
1988 } | |
1989 | |
1990 void TemplateTable::resolve_cache_and_index(int byte_no, | |
1991 Register Rcache, | |
1992 Register index) { | |
1993 assert(byte_no == 1 || byte_no == 2, "byte_no out of range"); | |
1994 | |
1995 const Register temp = rbx; | |
1996 assert_different_registers(Rcache, index, temp); | |
1997 | |
1998 const int shift_count = (1 + byte_no) * BitsPerByte; | |
1999 Label resolved; | |
2000 __ get_cache_and_index_at_bcp(Rcache, index, 1); | |
2001 __ movl(temp, Address(Rcache, | |
2002 index, Address::times_8, | |
2003 constantPoolCacheOopDesc::base_offset() + | |
2004 ConstantPoolCacheEntry::indices_offset())); | |
2005 __ shrl(temp, shift_count); | |
2006 // have we resolved this bytecode? | |
2007 __ andl(temp, 0xFF); | |
2008 __ cmpl(temp, (int) bytecode()); | |
2009 __ jcc(Assembler::equal, resolved); | |
2010 | |
2011 // resolve first time through | |
2012 address entry; | |
2013 switch (bytecode()) { | |
2014 case Bytecodes::_getstatic: | |
2015 case Bytecodes::_putstatic: | |
2016 case Bytecodes::_getfield: | |
2017 case Bytecodes::_putfield: | |
2018 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); | |
2019 break; | |
2020 case Bytecodes::_invokevirtual: | |
2021 case Bytecodes::_invokespecial: | |
2022 case Bytecodes::_invokestatic: | |
2023 case Bytecodes::_invokeinterface: | |
2024 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); | |
2025 break; | |
2026 default: | |
2027 ShouldNotReachHere(); | |
2028 break; | |
2029 } | |
2030 __ movl(temp, (int) bytecode()); | |
2031 __ call_VM(noreg, entry, temp); | |
2032 | |
2033 // Update registers with resolved info | |
2034 __ get_cache_and_index_at_bcp(Rcache, index, 1); | |
2035 __ bind(resolved); | |
2036 } | |
2037 | |
2038 // The Rcache and index registers must be set before call | |
2039 void TemplateTable::load_field_cp_cache_entry(Register obj, | |
2040 Register cache, | |
2041 Register index, | |
2042 Register off, | |
2043 Register flags, | |
2044 bool is_static = false) { | |
2045 assert_different_registers(cache, index, flags, off); | |
2046 | |
2047 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); | |
2048 // Field offset | |
304 | 2049 __ movptr(off, Address(cache, index, Address::times_8, |
2050 in_bytes(cp_base_offset + | |
2051 ConstantPoolCacheEntry::f2_offset()))); | |
0 | 2052 // Flags |
2053 __ movl(flags, Address(cache, index, Address::times_8, | |
2054 in_bytes(cp_base_offset + | |
2055 ConstantPoolCacheEntry::flags_offset()))); | |
2056 | |
2057 // klass overwrite register | |
2058 if (is_static) { | |
304 | 2059 __ movptr(obj, Address(cache, index, Address::times_8, |
2060 in_bytes(cp_base_offset + | |
2061 ConstantPoolCacheEntry::f1_offset()))); | |
0 | 2062 } |
2063 } | |
2064 | |
2065 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, | |
2066 Register method, | |
2067 Register itable_index, | |
2068 Register flags, | |
2069 bool is_invokevirtual, | |
2070 bool is_invokevfinal /*unused*/) { | |
2071 // setup registers | |
2072 const Register cache = rcx; | |
2073 const Register index = rdx; | |
2074 assert_different_registers(method, flags); | |
2075 assert_different_registers(method, cache, index); | |
2076 assert_different_registers(itable_index, flags); | |
2077 assert_different_registers(itable_index, cache, index); | |
2078 // determine constant pool cache field offsets | |
2079 const int method_offset = in_bytes( | |
2080 constantPoolCacheOopDesc::base_offset() + | |
2081 (is_invokevirtual | |
2082 ? ConstantPoolCacheEntry::f2_offset() | |
2083 : ConstantPoolCacheEntry::f1_offset())); | |
2084 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2085 ConstantPoolCacheEntry::flags_offset()); | |
2086 // access constant pool cache fields | |
2087 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2088 ConstantPoolCacheEntry::f2_offset()); | |
2089 | |
2090 resolve_cache_and_index(byte_no, cache, index); | |
2091 | |
2092 assert(wordSize == 8, "adjust code below"); | |
304 | 2093 __ movptr(method, Address(cache, index, Address::times_8, method_offset)); |
0 | 2094 if (itable_index != noreg) { |
304 | 2095 __ movptr(itable_index, |
0 | 2096 Address(cache, index, Address::times_8, index_offset)); |
2097 } | |
2098 __ movl(flags , Address(cache, index, Address::times_8, flags_offset)); | |
2099 } | |
2100 | |
2101 | |
2102 // The registers cache and index expected to be set before call. | |
2103 // Correct values of the cache and index registers are preserved. | |
2104 void TemplateTable::jvmti_post_field_access(Register cache, Register index, | |
2105 bool is_static, bool has_tos) { | |
2106 // do the JVMTI work here to avoid disturbing the register state below | |
2107 // We use c_rarg registers here because we want to use the register used in | |
2108 // the call to the VM | |
2109 if (JvmtiExport::can_post_field_access()) { | |
2110 // Check to see if a field access watch has been set before we | |
2111 // take the time to call into the VM. | |
2112 Label L1; | |
2113 assert_different_registers(cache, index, rax); | |
2114 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); | |
2115 __ testl(rax, rax); | |
2116 __ jcc(Assembler::zero, L1); | |
2117 | |
2118 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1); | |
2119 | |
2120 // cache entry pointer | |
304 | 2121 __ addptr(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset())); |
0 | 2122 __ shll(c_rarg3, LogBytesPerWord); |
304 | 2123 __ addptr(c_rarg2, c_rarg3); |
0 | 2124 if (is_static) { |
2125 __ xorl(c_rarg1, c_rarg1); // NULL object reference | |
2126 } else { | |
304 | 2127 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it |
0 | 2128 __ verify_oop(c_rarg1); |
2129 } | |
2130 // c_rarg1: object pointer or NULL | |
2131 // c_rarg2: cache entry pointer | |
2132 // c_rarg3: jvalue object on the stack | |
2133 __ call_VM(noreg, CAST_FROM_FN_PTR(address, | |
2134 InterpreterRuntime::post_field_access), | |
2135 c_rarg1, c_rarg2, c_rarg3); | |
2136 __ get_cache_and_index_at_bcp(cache, index, 1); | |
2137 __ bind(L1); | |
2138 } | |
2139 } | |
2140 | |
2141 void TemplateTable::pop_and_check_object(Register r) { | |
2142 __ pop_ptr(r); | |
2143 __ null_check(r); // for field access must check obj. | |
2144 __ verify_oop(r); | |
2145 } | |
2146 | |
2147 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { | |
2148 transition(vtos, vtos); | |
2149 | |
2150 const Register cache = rcx; | |
2151 const Register index = rdx; | |
2152 const Register obj = c_rarg3; | |
2153 const Register off = rbx; | |
2154 const Register flags = rax; | |
2155 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them | |
2156 | |
2157 resolve_cache_and_index(byte_no, cache, index); | |
2158 jvmti_post_field_access(cache, index, is_static, false); | |
2159 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); | |
2160 | |
2161 if (!is_static) { | |
2162 // obj is on the stack | |
2163 pop_and_check_object(obj); | |
2164 } | |
2165 | |
2166 const Address field(obj, off, Address::times_1); | |
2167 | |
2168 Label Done, notByte, notInt, notShort, notChar, | |
2169 notLong, notFloat, notObj, notDouble; | |
2170 | |
2171 __ shrl(flags, ConstantPoolCacheEntry::tosBits); | |
2172 assert(btos == 0, "change code, btos != 0"); | |
2173 | |
2174 __ andl(flags, 0x0F); | |
2175 __ jcc(Assembler::notZero, notByte); | |
2176 // btos | |
2177 __ load_signed_byte(rax, field); | |
2178 __ push(btos); | |
2179 // Rewrite bytecode to be faster | |
2180 if (!is_static) { | |
2181 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx); | |
2182 } | |
2183 __ jmp(Done); | |
2184 | |
2185 __ bind(notByte); | |
2186 __ cmpl(flags, atos); | |
2187 __ jcc(Assembler::notEqual, notObj); | |
2188 // atos | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2189 __ load_heap_oop(rax, field); |
0 | 2190 __ push(atos); |
2191 if (!is_static) { | |
2192 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); | |
2193 } | |
2194 __ jmp(Done); | |
2195 | |
2196 __ bind(notObj); | |
2197 __ cmpl(flags, itos); | |
2198 __ jcc(Assembler::notEqual, notInt); | |
2199 // itos | |
2200 __ movl(rax, field); | |
2201 __ push(itos); | |
2202 // Rewrite bytecode to be faster | |
2203 if (!is_static) { | |
2204 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx); | |
2205 } | |
2206 __ jmp(Done); | |
2207 | |
2208 __ bind(notInt); | |
2209 __ cmpl(flags, ctos); | |
2210 __ jcc(Assembler::notEqual, notChar); | |
2211 // ctos | |
2212 __ load_unsigned_word(rax, field); | |
2213 __ push(ctos); | |
2214 // Rewrite bytecode to be faster | |
2215 if (!is_static) { | |
2216 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx); | |
2217 } | |
2218 __ jmp(Done); | |
2219 | |
2220 __ bind(notChar); | |
2221 __ cmpl(flags, stos); | |
2222 __ jcc(Assembler::notEqual, notShort); | |
2223 // stos | |
2224 __ load_signed_word(rax, field); | |
2225 __ push(stos); | |
2226 // Rewrite bytecode to be faster | |
2227 if (!is_static) { | |
2228 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx); | |
2229 } | |
2230 __ jmp(Done); | |
2231 | |
2232 __ bind(notShort); | |
2233 __ cmpl(flags, ltos); | |
2234 __ jcc(Assembler::notEqual, notLong); | |
2235 // ltos | |
2236 __ movq(rax, field); | |
2237 __ push(ltos); | |
2238 // Rewrite bytecode to be faster | |
2239 if (!is_static) { | |
2240 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx); | |
2241 } | |
2242 __ jmp(Done); | |
2243 | |
2244 __ bind(notLong); | |
2245 __ cmpl(flags, ftos); | |
2246 __ jcc(Assembler::notEqual, notFloat); | |
2247 // ftos | |
2248 __ movflt(xmm0, field); | |
2249 __ push(ftos); | |
2250 // Rewrite bytecode to be faster | |
2251 if (!is_static) { | |
2252 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx); | |
2253 } | |
2254 __ jmp(Done); | |
2255 | |
2256 __ bind(notFloat); | |
2257 #ifdef ASSERT | |
2258 __ cmpl(flags, dtos); | |
2259 __ jcc(Assembler::notEqual, notDouble); | |
2260 #endif | |
2261 // dtos | |
2262 __ movdbl(xmm0, field); | |
2263 __ push(dtos); | |
2264 // Rewrite bytecode to be faster | |
2265 if (!is_static) { | |
2266 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx); | |
2267 } | |
2268 #ifdef ASSERT | |
2269 __ jmp(Done); | |
2270 | |
2271 __ bind(notDouble); | |
2272 __ stop("Bad state"); | |
2273 #endif | |
2274 | |
2275 __ bind(Done); | |
2276 // [jk] not needed currently | |
2277 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad | | |
2278 // Assembler::LoadStore)); | |
2279 } | |
2280 | |
2281 | |
2282 void TemplateTable::getfield(int byte_no) { | |
2283 getfield_or_static(byte_no, false); | |
2284 } | |
2285 | |
2286 void TemplateTable::getstatic(int byte_no) { | |
2287 getfield_or_static(byte_no, true); | |
2288 } | |
2289 | |
2290 // The registers cache and index expected to be set before call. | |
2291 // The function may destroy various registers, just not the cache and index registers. | |
2292 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { | |
2293 transition(vtos, vtos); | |
2294 | |
2295 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); | |
2296 | |
2297 if (JvmtiExport::can_post_field_modification()) { | |
2298 // Check to see if a field modification watch has been set before | |
2299 // we take the time to call into the VM. | |
2300 Label L1; | |
2301 assert_different_registers(cache, index, rax); | |
2302 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); | |
2303 __ testl(rax, rax); | |
2304 __ jcc(Assembler::zero, L1); | |
2305 | |
2306 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1); | |
2307 | |
2308 if (is_static) { | |
2309 // Life is simple. Null out the object pointer. | |
2310 __ xorl(c_rarg1, c_rarg1); | |
2311 } else { | |
2312 // Life is harder. The stack holds the value on top, followed by | |
2313 // the object. We don't know the size of the value, though; it | |
2314 // could be one or two words depending on its type. As a result, | |
2315 // we must find the type to determine where the object is. | |
2316 __ movl(c_rarg3, Address(c_rarg2, rscratch1, | |
2317 Address::times_8, | |
2318 in_bytes(cp_base_offset + | |
2319 ConstantPoolCacheEntry::flags_offset()))); | |
2320 __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits); | |
2321 // Make sure we don't need to mask rcx for tosBits after the | |
2322 // above shift | |
2323 ConstantPoolCacheEntry::verify_tosBits(); | |
304 | 2324 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue |
0 | 2325 __ cmpl(c_rarg3, ltos); |
304 | 2326 __ cmovptr(Assembler::equal, |
2327 c_rarg1, at_tos_p2()); // ltos (two word jvalue) | |
0 | 2328 __ cmpl(c_rarg3, dtos); |
304 | 2329 __ cmovptr(Assembler::equal, |
2330 c_rarg1, at_tos_p2()); // dtos (two word jvalue) | |
0 | 2331 } |
2332 // cache entry pointer | |
304 | 2333 __ addptr(c_rarg2, in_bytes(cp_base_offset)); |
0 | 2334 __ shll(rscratch1, LogBytesPerWord); |
304 | 2335 __ addptr(c_rarg2, rscratch1); |
0 | 2336 // object (tos) |
304 | 2337 __ mov(c_rarg3, rsp); |
0 | 2338 // c_rarg1: object pointer set up above (NULL if static) |
2339 // c_rarg2: cache entry pointer | |
2340 // c_rarg3: jvalue object on the stack | |
2341 __ call_VM(noreg, | |
2342 CAST_FROM_FN_PTR(address, | |
2343 InterpreterRuntime::post_field_modification), | |
2344 c_rarg1, c_rarg2, c_rarg3); | |
2345 __ get_cache_and_index_at_bcp(cache, index, 1); | |
2346 __ bind(L1); | |
2347 } | |
2348 } | |
2349 | |
2350 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { | |
2351 transition(vtos, vtos); | |
2352 | |
2353 const Register cache = rcx; | |
2354 const Register index = rdx; | |
2355 const Register obj = rcx; | |
2356 const Register off = rbx; | |
2357 const Register flags = rax; | |
2358 const Register bc = c_rarg3; | |
2359 | |
2360 resolve_cache_and_index(byte_no, cache, index); | |
2361 jvmti_post_field_mod(cache, index, is_static); | |
2362 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); | |
2363 | |
2364 // [jk] not needed currently | |
2365 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | | |
2366 // Assembler::StoreStore)); | |
2367 | |
2368 Label notVolatile, Done; | |
2369 __ movl(rdx, flags); | |
2370 __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2371 __ andl(rdx, 0x1); | |
2372 | |
2373 // field address | |
2374 const Address field(obj, off, Address::times_1); | |
2375 | |
2376 Label notByte, notInt, notShort, notChar, | |
2377 notLong, notFloat, notObj, notDouble; | |
2378 | |
2379 __ shrl(flags, ConstantPoolCacheEntry::tosBits); | |
2380 | |
2381 assert(btos == 0, "change code, btos != 0"); | |
2382 __ andl(flags, 0x0f); | |
2383 __ jcc(Assembler::notZero, notByte); | |
2384 // btos | |
2385 __ pop(btos); | |
2386 if (!is_static) pop_and_check_object(obj); | |
2387 __ movb(field, rax); | |
2388 if (!is_static) { | |
2389 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx); | |
2390 } | |
2391 __ jmp(Done); | |
2392 | |
2393 __ bind(notByte); | |
2394 __ cmpl(flags, atos); | |
2395 __ jcc(Assembler::notEqual, notObj); | |
2396 // atos | |
2397 __ pop(atos); | |
2398 if (!is_static) pop_and_check_object(obj); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2399 __ store_heap_oop(field, rax); |
0 | 2400 __ store_check(obj, field); // Need to mark card |
2401 if (!is_static) { | |
2402 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx); | |
2403 } | |
2404 __ jmp(Done); | |
2405 | |
2406 __ bind(notObj); | |
2407 __ cmpl(flags, itos); | |
2408 __ jcc(Assembler::notEqual, notInt); | |
2409 // itos | |
2410 __ pop(itos); | |
2411 if (!is_static) pop_and_check_object(obj); | |
2412 __ movl(field, rax); | |
2413 if (!is_static) { | |
2414 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx); | |
2415 } | |
2416 __ jmp(Done); | |
2417 | |
2418 __ bind(notInt); | |
2419 __ cmpl(flags, ctos); | |
2420 __ jcc(Assembler::notEqual, notChar); | |
2421 // ctos | |
2422 __ pop(ctos); | |
2423 if (!is_static) pop_and_check_object(obj); | |
2424 __ movw(field, rax); | |
2425 if (!is_static) { | |
2426 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx); | |
2427 } | |
2428 __ jmp(Done); | |
2429 | |
2430 __ bind(notChar); | |
2431 __ cmpl(flags, stos); | |
2432 __ jcc(Assembler::notEqual, notShort); | |
2433 // stos | |
2434 __ pop(stos); | |
2435 if (!is_static) pop_and_check_object(obj); | |
2436 __ movw(field, rax); | |
2437 if (!is_static) { | |
2438 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx); | |
2439 } | |
2440 __ jmp(Done); | |
2441 | |
2442 __ bind(notShort); | |
2443 __ cmpl(flags, ltos); | |
2444 __ jcc(Assembler::notEqual, notLong); | |
2445 // ltos | |
2446 __ pop(ltos); | |
2447 if (!is_static) pop_and_check_object(obj); | |
2448 __ movq(field, rax); | |
2449 if (!is_static) { | |
2450 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx); | |
2451 } | |
2452 __ jmp(Done); | |
2453 | |
2454 __ bind(notLong); | |
2455 __ cmpl(flags, ftos); | |
2456 __ jcc(Assembler::notEqual, notFloat); | |
2457 // ftos | |
2458 __ pop(ftos); | |
2459 if (!is_static) pop_and_check_object(obj); | |
2460 __ movflt(field, xmm0); | |
2461 if (!is_static) { | |
2462 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx); | |
2463 } | |
2464 __ jmp(Done); | |
2465 | |
2466 __ bind(notFloat); | |
2467 #ifdef ASSERT | |
2468 __ cmpl(flags, dtos); | |
2469 __ jcc(Assembler::notEqual, notDouble); | |
2470 #endif | |
2471 // dtos | |
2472 __ pop(dtos); | |
2473 if (!is_static) pop_and_check_object(obj); | |
2474 __ movdbl(field, xmm0); | |
2475 if (!is_static) { | |
2476 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx); | |
2477 } | |
2478 | |
2479 #ifdef ASSERT | |
2480 __ jmp(Done); | |
2481 | |
2482 __ bind(notDouble); | |
2483 __ stop("Bad state"); | |
2484 #endif | |
2485 | |
2486 __ bind(Done); | |
2487 // Check for volatile store | |
2488 __ testl(rdx, rdx); | |
2489 __ jcc(Assembler::zero, notVolatile); | |
2490 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | | |
2491 Assembler::StoreStore)); | |
2492 | |
2493 __ bind(notVolatile); | |
2494 } | |
2495 | |
2496 void TemplateTable::putfield(int byte_no) { | |
2497 putfield_or_static(byte_no, false); | |
2498 } | |
2499 | |
2500 void TemplateTable::putstatic(int byte_no) { | |
2501 putfield_or_static(byte_no, true); | |
2502 } | |
2503 | |
2504 void TemplateTable::jvmti_post_fast_field_mod() { | |
2505 if (JvmtiExport::can_post_field_modification()) { | |
2506 // Check to see if a field modification watch has been set before | |
2507 // we take the time to call into the VM. | |
2508 Label L2; | |
2509 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); | |
2510 __ testl(c_rarg3, c_rarg3); | |
2511 __ jcc(Assembler::zero, L2); | |
2512 __ pop_ptr(rbx); // copy the object pointer from tos | |
2513 __ verify_oop(rbx); | |
2514 __ push_ptr(rbx); // put the object pointer back on tos | |
304 | 2515 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object |
2516 __ mov(c_rarg3, rsp); | |
0 | 2517 const Address field(c_rarg3, 0); |
2518 | |
2519 switch (bytecode()) { // load values into the jvalue object | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2520 case Bytecodes::_fast_aputfield: __ movq(field, rax); break; |
0 | 2521 case Bytecodes::_fast_lputfield: __ movq(field, rax); break; |
2522 case Bytecodes::_fast_iputfield: __ movl(field, rax); break; | |
2523 case Bytecodes::_fast_bputfield: __ movb(field, rax); break; | |
2524 case Bytecodes::_fast_sputfield: // fall through | |
2525 case Bytecodes::_fast_cputfield: __ movw(field, rax); break; | |
2526 case Bytecodes::_fast_fputfield: __ movflt(field, xmm0); break; | |
2527 case Bytecodes::_fast_dputfield: __ movdbl(field, xmm0); break; | |
2528 default: | |
2529 ShouldNotReachHere(); | |
2530 } | |
2531 | |
2532 // Save rax because call_VM() will clobber it, then use it for | |
2533 // JVMTI purposes | |
304 | 2534 __ push(rax); |
0 | 2535 // access constant pool cache entry |
2536 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1); | |
2537 __ verify_oop(rbx); | |
2538 // rbx: object pointer copied above | |
2539 // c_rarg2: cache entry pointer | |
2540 // c_rarg3: jvalue object on the stack | |
2541 __ call_VM(noreg, | |
2542 CAST_FROM_FN_PTR(address, | |
2543 InterpreterRuntime::post_field_modification), | |
2544 rbx, c_rarg2, c_rarg3); | |
304 | 2545 __ pop(rax); // restore lower value |
2546 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space | |
0 | 2547 __ bind(L2); |
2548 } | |
2549 } | |
2550 | |
2551 void TemplateTable::fast_storefield(TosState state) { | |
2552 transition(state, vtos); | |
2553 | |
2554 ByteSize base = constantPoolCacheOopDesc::base_offset(); | |
2555 | |
2556 jvmti_post_fast_field_mod(); | |
2557 | |
2558 // access constant pool cache | |
2559 __ get_cache_and_index_at_bcp(rcx, rbx, 1); | |
2560 | |
2561 // test for volatile with rdx | |
2562 __ movl(rdx, Address(rcx, rbx, Address::times_8, | |
2563 in_bytes(base + | |
2564 ConstantPoolCacheEntry::flags_offset()))); | |
2565 | |
2566 // replace index with field offset from cache entry | |
304 | 2567 __ movptr(rbx, Address(rcx, rbx, Address::times_8, |
2568 in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); | |
0 | 2569 |
2570 // [jk] not needed currently | |
2571 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | | |
2572 // Assembler::StoreStore)); | |
2573 | |
2574 Label notVolatile; | |
2575 __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2576 __ andl(rdx, 0x1); | |
2577 | |
2578 // Get object from stack | |
2579 pop_and_check_object(rcx); | |
2580 | |
2581 // field address | |
2582 const Address field(rcx, rbx, Address::times_1); | |
2583 | |
2584 // access field | |
2585 switch (bytecode()) { | |
2586 case Bytecodes::_fast_aputfield: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2587 __ store_heap_oop(field, rax); |
0 | 2588 __ store_check(rcx, field); |
2589 break; | |
2590 case Bytecodes::_fast_lputfield: | |
2591 __ movq(field, rax); | |
2592 break; | |
2593 case Bytecodes::_fast_iputfield: | |
2594 __ movl(field, rax); | |
2595 break; | |
2596 case Bytecodes::_fast_bputfield: | |
2597 __ movb(field, rax); | |
2598 break; | |
2599 case Bytecodes::_fast_sputfield: | |
2600 // fall through | |
2601 case Bytecodes::_fast_cputfield: | |
2602 __ movw(field, rax); | |
2603 break; | |
2604 case Bytecodes::_fast_fputfield: | |
2605 __ movflt(field, xmm0); | |
2606 break; | |
2607 case Bytecodes::_fast_dputfield: | |
2608 __ movdbl(field, xmm0); | |
2609 break; | |
2610 default: | |
2611 ShouldNotReachHere(); | |
2612 } | |
2613 | |
2614 // Check for volatile store | |
2615 __ testl(rdx, rdx); | |
2616 __ jcc(Assembler::zero, notVolatile); | |
2617 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | | |
2618 Assembler::StoreStore)); | |
2619 __ bind(notVolatile); | |
2620 } | |
2621 | |
2622 | |
2623 void TemplateTable::fast_accessfield(TosState state) { | |
2624 transition(atos, state); | |
2625 | |
2626 // Do the JVMTI work here to avoid disturbing the register state below | |
2627 if (JvmtiExport::can_post_field_access()) { | |
2628 // Check to see if a field access watch has been set before we | |
2629 // take the time to call into the VM. | |
2630 Label L1; | |
2631 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); | |
2632 __ testl(rcx, rcx); | |
2633 __ jcc(Assembler::zero, L1); | |
2634 // access constant pool cache entry | |
2635 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2636 __ verify_oop(rax); |
304 | 2637 __ mov(r12, rax); // save object pointer before call_VM() clobbers it |
2638 __ mov(c_rarg1, rax); | |
0 | 2639 // c_rarg1: object pointer copied above |
2640 // c_rarg2: cache entry pointer | |
2641 __ call_VM(noreg, | |
2642 CAST_FROM_FN_PTR(address, | |
2643 InterpreterRuntime::post_field_access), | |
2644 c_rarg1, c_rarg2); | |
304 | 2645 __ mov(rax, r12); // restore object pointer |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2646 __ reinit_heapbase(); |
0 | 2647 __ bind(L1); |
2648 } | |
2649 | |
2650 // access constant pool cache | |
2651 __ get_cache_and_index_at_bcp(rcx, rbx, 1); | |
2652 // replace index with field offset from cache entry | |
2653 // [jk] not needed currently | |
2654 // if (os::is_MP()) { | |
2655 // __ movl(rdx, Address(rcx, rbx, Address::times_8, | |
2656 // in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2657 // ConstantPoolCacheEntry::flags_offset()))); | |
2658 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2659 // __ andl(rdx, 0x1); | |
2660 // } | |
304 | 2661 __ movptr(rbx, Address(rcx, rbx, Address::times_8, |
2662 in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2663 ConstantPoolCacheEntry::f2_offset()))); | |
0 | 2664 |
2665 // rax: object | |
2666 __ verify_oop(rax); | |
2667 __ null_check(rax); | |
2668 Address field(rax, rbx, Address::times_1); | |
2669 | |
2670 // access field | |
2671 switch (bytecode()) { | |
2672 case Bytecodes::_fast_agetfield: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2673 __ load_heap_oop(rax, field); |
0 | 2674 __ verify_oop(rax); |
2675 break; | |
2676 case Bytecodes::_fast_lgetfield: | |
2677 __ movq(rax, field); | |
2678 break; | |
2679 case Bytecodes::_fast_igetfield: | |
2680 __ movl(rax, field); | |
2681 break; | |
2682 case Bytecodes::_fast_bgetfield: | |
2683 __ movsbl(rax, field); | |
2684 break; | |
2685 case Bytecodes::_fast_sgetfield: | |
2686 __ load_signed_word(rax, field); | |
2687 break; | |
2688 case Bytecodes::_fast_cgetfield: | |
2689 __ load_unsigned_word(rax, field); | |
2690 break; | |
2691 case Bytecodes::_fast_fgetfield: | |
2692 __ movflt(xmm0, field); | |
2693 break; | |
2694 case Bytecodes::_fast_dgetfield: | |
2695 __ movdbl(xmm0, field); | |
2696 break; | |
2697 default: | |
2698 ShouldNotReachHere(); | |
2699 } | |
2700 // [jk] not needed currently | |
2701 // if (os::is_MP()) { | |
2702 // Label notVolatile; | |
2703 // __ testl(rdx, rdx); | |
2704 // __ jcc(Assembler::zero, notVolatile); | |
2705 // __ membar(Assembler::LoadLoad); | |
2706 // __ bind(notVolatile); | |
2707 //}; | |
2708 } | |
2709 | |
2710 void TemplateTable::fast_xaccess(TosState state) { | |
2711 transition(vtos, state); | |
2712 | |
2713 // get receiver | |
304 | 2714 __ movptr(rax, aaddress(0)); |
0 | 2715 debug_only(__ verify_local_tag(frame::TagReference, 0)); |
2716 // access constant pool cache | |
2717 __ get_cache_and_index_at_bcp(rcx, rdx, 2); | |
304 | 2718 __ movptr(rbx, |
2719 Address(rcx, rdx, Address::times_8, | |
2720 in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2721 ConstantPoolCacheEntry::f2_offset()))); | |
0 | 2722 // make sure exception is reported in correct bcp range (getfield is |
2723 // next instruction) | |
304 | 2724 __ increment(r13); |
0 | 2725 __ null_check(rax); |
2726 switch (state) { | |
2727 case itos: | |
2728 __ movl(rax, Address(rax, rbx, Address::times_1)); | |
2729 break; | |
2730 case atos: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2731 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1)); |
0 | 2732 __ verify_oop(rax); |
2733 break; | |
2734 case ftos: | |
2735 __ movflt(xmm0, Address(rax, rbx, Address::times_1)); | |
2736 break; | |
2737 default: | |
2738 ShouldNotReachHere(); | |
2739 } | |
2740 | |
2741 // [jk] not needed currently | |
2742 // if (os::is_MP()) { | |
2743 // Label notVolatile; | |
2744 // __ movl(rdx, Address(rcx, rdx, Address::times_8, | |
2745 // in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2746 // ConstantPoolCacheEntry::flags_offset()))); | |
2747 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2748 // __ testl(rdx, 0x1); | |
2749 // __ jcc(Assembler::zero, notVolatile); | |
2750 // __ membar(Assembler::LoadLoad); | |
2751 // __ bind(notVolatile); | |
2752 // } | |
2753 | |
304 | 2754 __ decrement(r13); |
0 | 2755 } |
2756 | |
2757 | |
2758 | |
2759 //----------------------------------------------------------------------------- | |
2760 // Calls | |
2761 | |
2762 void TemplateTable::count_calls(Register method, Register temp) { | |
2763 // implemented elsewhere | |
2764 ShouldNotReachHere(); | |
2765 } | |
2766 | |
2767 void TemplateTable::prepare_invoke(Register method, | |
2768 Register index, | |
2769 int byte_no, | |
2770 Bytecodes::Code code) { | |
2771 // determine flags | |
2772 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; | |
2773 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; | |
2774 const bool is_invokespecial = code == Bytecodes::_invokespecial; | |
2775 const bool load_receiver = code != Bytecodes::_invokestatic; | |
2776 const bool receiver_null_check = is_invokespecial; | |
2777 const bool save_flags = is_invokeinterface || is_invokevirtual; | |
2778 // setup registers & access constant pool cache | |
2779 const Register recv = rcx; | |
2780 const Register flags = rdx; | |
2781 assert_different_registers(method, index, recv, flags); | |
2782 | |
2783 // save 'interpreter return address' | |
2784 __ save_bcp(); | |
2785 | |
2786 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual); | |
2787 | |
2788 // load receiver if needed (note: no return address pushed yet) | |
2789 if (load_receiver) { | |
2790 __ movl(recv, flags); | |
2791 __ andl(recv, 0xFF); | |
2792 if (TaggedStackInterpreter) __ shll(recv, 1); // index*2 | |
304 | 2793 __ movptr(recv, Address(rsp, recv, Address::times_8, |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2794 -Interpreter::expr_offset_in_bytes(1))); |
0 | 2795 __ verify_oop(recv); |
2796 } | |
2797 | |
2798 // do null check if needed | |
2799 if (receiver_null_check) { | |
2800 __ null_check(recv); | |
2801 } | |
2802 | |
2803 if (save_flags) { | |
2804 __ movl(r13, flags); | |
2805 } | |
2806 | |
2807 // compute return type | |
2808 __ shrl(flags, ConstantPoolCacheEntry::tosBits); | |
2809 // Make sure we don't need to mask flags for tosBits after the above shift | |
2810 ConstantPoolCacheEntry::verify_tosBits(); | |
2811 // load return address | |
2812 { | |
2813 ExternalAddress return_5((address)Interpreter::return_5_addrs_by_index_table()); | |
2814 ExternalAddress return_3((address)Interpreter::return_3_addrs_by_index_table()); | |
2815 __ lea(rscratch1, (is_invokeinterface ? return_5 : return_3)); | |
304 | 2816 __ movptr(flags, Address(rscratch1, flags, Address::times_8)); |
0 | 2817 } |
2818 | |
2819 // push return address | |
304 | 2820 __ push(flags); |
0 | 2821 |
2822 // Restore flag field from the constant pool cache, and restore esi | |
2823 // for later null checks. r13 is the bytecode pointer | |
2824 if (save_flags) { | |
2825 __ movl(flags, r13); | |
2826 __ restore_bcp(); | |
2827 } | |
2828 } | |
2829 | |
2830 | |
2831 void TemplateTable::invokevirtual_helper(Register index, | |
2832 Register recv, | |
2833 Register flags) { | |
2834 // Uses temporary registers rax, rdx assert_different_registers(index, recv, rax, rdx); | |
2835 | |
2836 // Test for an invoke of a final method | |
2837 Label notFinal; | |
2838 __ movl(rax, flags); | |
2839 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod)); | |
2840 __ jcc(Assembler::zero, notFinal); | |
2841 | |
2842 const Register method = index; // method must be rbx | |
2843 assert(method == rbx, | |
2844 "methodOop must be rbx for interpreter calling convention"); | |
2845 | |
2846 // do the call - the index is actually the method to call | |
2847 __ verify_oop(method); | |
2848 | |
2849 // It's final, need a null check here! | |
2850 __ null_check(recv); | |
2851 | |
2852 // profile this call | |
2853 __ profile_final_call(rax); | |
2854 | |
2855 __ jump_from_interpreted(method, rax); | |
2856 | |
2857 __ bind(notFinal); | |
2858 | |
2859 // get receiver klass | |
2860 __ null_check(recv, oopDesc::klass_offset_in_bytes()); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2861 __ load_klass(rax, recv); |
0 | 2862 |
2863 __ verify_oop(rax); | |
2864 | |
2865 // profile this call | |
2866 __ profile_virtual_call(rax, r14, rdx); | |
2867 | |
2868 // get target methodOop & entry point | |
2869 const int base = instanceKlass::vtable_start_offset() * wordSize; | |
2870 assert(vtableEntry::size() * wordSize == 8, | |
2871 "adjust the scaling in the code below"); | |
304 | 2872 __ movptr(method, Address(rax, index, |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2873 Address::times_8, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2874 base + vtableEntry::method_offset_in_bytes())); |
304 | 2875 __ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset())); |
0 | 2876 __ jump_from_interpreted(method, rdx); |
2877 } | |
2878 | |
2879 | |
2880 void TemplateTable::invokevirtual(int byte_no) { | |
2881 transition(vtos, vtos); | |
2882 prepare_invoke(rbx, noreg, byte_no, bytecode()); | |
2883 | |
2884 // rbx: index | |
2885 // rcx: receiver | |
2886 // rdx: flags | |
2887 | |
2888 invokevirtual_helper(rbx, rcx, rdx); | |
2889 } | |
2890 | |
2891 | |
2892 void TemplateTable::invokespecial(int byte_no) { | |
2893 transition(vtos, vtos); | |
2894 prepare_invoke(rbx, noreg, byte_no, bytecode()); | |
2895 // do the call | |
2896 __ verify_oop(rbx); | |
2897 __ profile_call(rax); | |
2898 __ jump_from_interpreted(rbx, rax); | |
2899 } | |
2900 | |
2901 | |
2902 void TemplateTable::invokestatic(int byte_no) { | |
2903 transition(vtos, vtos); | |
2904 prepare_invoke(rbx, noreg, byte_no, bytecode()); | |
2905 // do the call | |
2906 __ verify_oop(rbx); | |
2907 __ profile_call(rax); | |
2908 __ jump_from_interpreted(rbx, rax); | |
2909 } | |
2910 | |
2911 void TemplateTable::fast_invokevfinal(int byte_no) { | |
2912 transition(vtos, vtos); | |
2913 __ stop("fast_invokevfinal not used on amd64"); | |
2914 } | |
2915 | |
2916 void TemplateTable::invokeinterface(int byte_no) { | |
2917 transition(vtos, vtos); | |
2918 prepare_invoke(rax, rbx, byte_no, bytecode()); | |
2919 | |
2920 // rax: Interface | |
2921 // rbx: index | |
2922 // rcx: receiver | |
2923 // rdx: flags | |
2924 | |
2925 // Special case of invokeinterface called for virtual method of | |
2926 // java.lang.Object. See cpCacheOop.cpp for details. | |
2927 // This code isn't produced by javac, but could be produced by | |
2928 // another compliant java compiler. | |
2929 Label notMethod; | |
2930 __ movl(r14, rdx); | |
2931 __ andl(r14, (1 << ConstantPoolCacheEntry::methodInterface)); | |
2932 __ jcc(Assembler::zero, notMethod); | |
2933 | |
2934 invokevirtual_helper(rbx, rcx, rdx); | |
2935 __ bind(notMethod); | |
2936 | |
2937 // Get receiver klass into rdx - also a null check | |
2938 __ restore_locals(); // restore r14 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2939 __ load_klass(rdx, rcx); |
0 | 2940 __ verify_oop(rdx); |
2941 | |
2942 // profile this call | |
2943 __ profile_virtual_call(rdx, r13, r14); | |
2944 | |
304 | 2945 __ mov(r14, rdx); // Save klassOop in r14 |
0 | 2946 |
2947 // Compute start of first itableOffsetEntry (which is at the end of | |
2948 // the vtable) | |
2949 const int base = instanceKlass::vtable_start_offset() * wordSize; | |
2950 // Get length of vtable | |
2951 assert(vtableEntry::size() * wordSize == 8, | |
2952 "adjust the scaling in the code below"); | |
2953 __ movl(r13, Address(rdx, | |
2954 instanceKlass::vtable_length_offset() * wordSize)); | |
304 | 2955 __ lea(rdx, Address(rdx, r13, Address::times_8, base)); |
0 | 2956 |
2957 if (HeapWordsPerLong > 1) { | |
2958 // Round up to align_object_offset boundary | |
304 | 2959 __ round_to(rdx, BytesPerLong); |
0 | 2960 } |
2961 | |
2962 Label entry, search, interface_ok; | |
2963 | |
2964 __ jmpb(entry); | |
2965 __ bind(search); | |
304 | 2966 __ addptr(rdx, itableOffsetEntry::size() * wordSize); |
0 | 2967 |
2968 __ bind(entry); | |
2969 | |
2970 // Check that the entry is non-null. A null entry means that the | |
2971 // receiver class doesn't implement the interface, and wasn't the | |
2972 // same as the receiver class checked when the interface was | |
2973 // resolved. | |
304 | 2974 __ push(rdx); |
2975 __ movptr(rdx, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); | |
2976 __ testptr(rdx, rdx); | |
0 | 2977 __ jcc(Assembler::notZero, interface_ok); |
2978 // throw exception | |
304 | 2979 __ pop(rdx); // pop saved register first. |
2980 __ pop(rbx); // pop return address (pushed by prepare_invoke) | |
0 | 2981 __ restore_bcp(); // r13 must be correct for exception handler (was |
2982 // destroyed) | |
2983 __ restore_locals(); // make sure locals pointer is correct as well | |
2984 // (was destroyed) | |
2985 __ call_VM(noreg, CAST_FROM_FN_PTR(address, | |
2986 InterpreterRuntime::throw_IncompatibleClassChangeError)); | |
2987 // the call_VM checks for exception, so we should never return here. | |
2988 __ should_not_reach_here(); | |
2989 __ bind(interface_ok); | |
2990 | |
304 | 2991 __ pop(rdx); |
2992 | |
2993 __ cmpptr(rax, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); | |
0 | 2994 __ jcc(Assembler::notEqual, search); |
2995 | |
2996 __ movl(rdx, Address(rdx, itableOffsetEntry::offset_offset_in_bytes())); | |
2997 | |
304 | 2998 __ addptr(rdx, r14); // Add offset to klassOop |
0 | 2999 assert(itableMethodEntry::size() * wordSize == 8, |
3000 "adjust the scaling in the code below"); | |
304 | 3001 __ movptr(rbx, Address(rdx, rbx, Address::times_8)); |
0 | 3002 // rbx: methodOop to call |
3003 // rcx: receiver | |
3004 // Check for abstract method error | |
3005 // Note: This should be done more efficiently via a | |
3006 // throw_abstract_method_error interpreter entry point and a | |
3007 // conditional jump to it in case of a null method. | |
3008 { | |
3009 Label L; | |
304 | 3010 __ testptr(rbx, rbx); |
0 | 3011 __ jcc(Assembler::notZero, L); |
3012 // throw exception | |
3013 // note: must restore interpreter registers to canonical | |
3014 // state for exception handling to work correctly! | |
304 | 3015 __ pop(rbx); // pop return address (pushed by prepare_invoke) |
0 | 3016 __ restore_bcp(); // r13 must be correct for exception handler |
3017 // (was destroyed) | |
3018 __ restore_locals(); // make sure locals pointer is correct as | |
3019 // well (was destroyed) | |
3020 __ call_VM(noreg, | |
3021 CAST_FROM_FN_PTR(address, | |
3022 InterpreterRuntime::throw_AbstractMethodError)); | |
3023 // the call_VM checks for exception, so we should never return here. | |
3024 __ should_not_reach_here(); | |
3025 __ bind(L); | |
3026 } | |
3027 | |
304 | 3028 __ movptr(rcx, Address(rbx, methodOopDesc::interpreter_entry_offset())); |
0 | 3029 |
3030 // do the call | |
3031 // rcx: receiver | |
3032 // rbx: methodOop | |
3033 __ jump_from_interpreted(rbx, rdx); | |
3034 } | |
3035 | |
3036 //----------------------------------------------------------------------------- | |
3037 // Allocation | |
3038 | |
3039 void TemplateTable::_new() { | |
3040 transition(vtos, atos); | |
3041 __ get_unsigned_2_byte_index_at_bcp(rdx, 1); | |
3042 Label slow_case; | |
3043 Label done; | |
3044 Label initialize_header; | |
3045 Label initialize_object; // including clearing the fields | |
3046 Label allocate_shared; | |
3047 ExternalAddress top((address)Universe::heap()->top_addr()); | |
3048 ExternalAddress end((address)Universe::heap()->end_addr()); | |
3049 | |
3050 __ get_cpool_and_tags(rsi, rax); | |
3051 // get instanceKlass | |
304 | 3052 __ movptr(rsi, Address(rsi, rdx, |
3053 Address::times_8, sizeof(constantPoolOopDesc))); | |
0 | 3054 |
3055 // make sure the class we're about to instantiate has been | |
3056 // resolved. Note: slow_case does a pop of stack, which is why we | |
3057 // loaded class/pushed above | |
3058 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; | |
3059 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), | |
3060 JVM_CONSTANT_Class); | |
3061 __ jcc(Assembler::notEqual, slow_case); | |
3062 | |
3063 // make sure klass is initialized & doesn't have finalizer | |
3064 // make sure klass is fully initialized | |
3065 __ cmpl(Address(rsi, | |
3066 instanceKlass::init_state_offset_in_bytes() + | |
3067 sizeof(oopDesc)), | |
3068 instanceKlass::fully_initialized); | |
3069 __ jcc(Assembler::notEqual, slow_case); | |
3070 | |
3071 // get instance_size in instanceKlass (scaled to a count of bytes) | |
3072 __ movl(rdx, | |
3073 Address(rsi, | |
3074 Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); | |
3075 // test to see if it has a finalizer or is malformed in some way | |
3076 __ testl(rdx, Klass::_lh_instance_slow_path_bit); | |
3077 __ jcc(Assembler::notZero, slow_case); | |
3078 | |
3079 // Allocate the instance | |
3080 // 1) Try to allocate in the TLAB | |
3081 // 2) if fail and the object is large allocate in the shared Eden | |
3082 // 3) if the above fails (or is not applicable), go to a slow case | |
3083 // (creates a new TLAB, etc.) | |
3084 | |
3085 const bool allow_shared_alloc = | |
3086 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; | |
3087 | |
3088 if (UseTLAB) { | |
304 | 3089 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); |
3090 __ lea(rbx, Address(rax, rdx, Address::times_1)); | |
3091 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); | |
0 | 3092 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); |
304 | 3093 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx); |
0 | 3094 if (ZeroTLAB) { |
3095 // the fields have been already cleared | |
3096 __ jmp(initialize_header); | |
3097 } else { | |
3098 // initialize both the header and fields | |
3099 __ jmp(initialize_object); | |
3100 } | |
3101 } | |
3102 | |
3103 // Allocation in the shared Eden, if allowed. | |
3104 // | |
3105 // rdx: instance size in bytes | |
3106 if (allow_shared_alloc) { | |
3107 __ bind(allocate_shared); | |
3108 | |
3109 const Register RtopAddr = rscratch1; | |
3110 const Register RendAddr = rscratch2; | |
3111 | |
3112 __ lea(RtopAddr, top); | |
3113 __ lea(RendAddr, end); | |
304 | 3114 __ movptr(rax, Address(RtopAddr, 0)); |
0 | 3115 |
3116 // For retries rax gets set by cmpxchgq | |
3117 Label retry; | |
3118 __ bind(retry); | |
304 | 3119 __ lea(rbx, Address(rax, rdx, Address::times_1)); |
3120 __ cmpptr(rbx, Address(RendAddr, 0)); | |
0 | 3121 __ jcc(Assembler::above, slow_case); |
3122 | |
3123 // Compare rax with the top addr, and if still equal, store the new | |
3124 // top addr in rbx at the address of the top addr pointer. Sets ZF if was | |
3125 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. | |
3126 // | |
3127 // rax: object begin | |
3128 // rbx: object end | |
3129 // rdx: instance size in bytes | |
3130 if (os::is_MP()) { | |
3131 __ lock(); | |
3132 } | |
304 | 3133 __ cmpxchgptr(rbx, Address(RtopAddr, 0)); |
0 | 3134 |
3135 // if someone beat us on the allocation, try again, otherwise continue | |
3136 __ jcc(Assembler::notEqual, retry); | |
3137 } | |
3138 | |
3139 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { | |
3140 // The object is initialized before the header. If the object size is | |
3141 // zero, go directly to the header initialization. | |
3142 __ bind(initialize_object); | |
3143 __ decrementl(rdx, sizeof(oopDesc)); | |
3144 __ jcc(Assembler::zero, initialize_header); | |
3145 | |
3146 // Initialize object fields | |
3147 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) | |
3148 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop | |
3149 { | |
3150 Label loop; | |
3151 __ bind(loop); | |
3152 __ movq(Address(rax, rdx, Address::times_8, | |
3153 sizeof(oopDesc) - oopSize), | |
3154 rcx); | |
3155 __ decrementl(rdx); | |
3156 __ jcc(Assembler::notZero, loop); | |
3157 } | |
3158 | |
3159 // initialize object header only. | |
3160 __ bind(initialize_header); | |
3161 if (UseBiasedLocking) { | |
304 | 3162 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); |
3163 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1); | |
0 | 3164 } else { |
3165 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), | |
3166 (intptr_t) markOopDesc::prototype()); // header (address 0x1) | |
3167 } | |
167
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
113
diff
changeset
|
3168 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) |
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
113
diff
changeset
|
3169 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops |
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
113
diff
changeset
|
3170 __ store_klass(rax, rsi); // store klass last |
0 | 3171 __ jmp(done); |
3172 } | |
3173 | |
3174 { | |
3175 SkipIfEqual skip(_masm, &DTraceAllocProbes, false); | |
3176 // Trigger dtrace event for fastpath | |
3177 __ push(atos); // save the return value | |
3178 __ call_VM_leaf( | |
3179 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax); | |
3180 __ pop(atos); // restore the return value | |
3181 } | |
3182 | |
3183 // slow case | |
3184 __ bind(slow_case); | |
3185 __ get_constant_pool(c_rarg1); | |
3186 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); | |
3187 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2); | |
3188 __ verify_oop(rax); | |
3189 | |
3190 // continue | |
3191 __ bind(done); | |
3192 } | |
3193 | |
3194 void TemplateTable::newarray() { | |
3195 transition(itos, atos); | |
3196 __ load_unsigned_byte(c_rarg1, at_bcp(1)); | |
3197 __ movl(c_rarg2, rax); | |
3198 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), | |
3199 c_rarg1, c_rarg2); | |
3200 } | |
3201 | |
3202 void TemplateTable::anewarray() { | |
3203 transition(itos, atos); | |
3204 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); | |
3205 __ get_constant_pool(c_rarg1); | |
3206 __ movl(c_rarg3, rax); | |
3207 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), | |
3208 c_rarg1, c_rarg2, c_rarg3); | |
3209 } | |
3210 | |
3211 void TemplateTable::arraylength() { | |
3212 transition(atos, itos); | |
3213 __ null_check(rax, arrayOopDesc::length_offset_in_bytes()); | |
3214 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes())); | |
3215 } | |
3216 | |
3217 void TemplateTable::checkcast() { | |
3218 transition(atos, atos); | |
3219 Label done, is_null, ok_is_subtype, quicked, resolved; | |
304 | 3220 __ testptr(rax, rax); // object is in rax |
0 | 3221 __ jcc(Assembler::zero, is_null); |
3222 | |
3223 // Get cpool & tags index | |
3224 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array | |
3225 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index | |
3226 // See if bytecode has already been quicked | |
3227 __ cmpb(Address(rdx, rbx, | |
3228 Address::times_1, | |
3229 typeArrayOopDesc::header_size(T_BYTE) * wordSize), | |
3230 JVM_CONSTANT_Class); | |
3231 __ jcc(Assembler::equal, quicked); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3232 __ push(atos); // save receiver for result, and for GC |
304 | 3233 __ mov(r12, rcx); // save rcx XXX |
0 | 3234 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3235 __ movq(rcx, r12); // restore rcx XXX |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3236 __ reinit_heapbase(); |
0 | 3237 __ pop_ptr(rdx); // restore receiver |
3238 __ jmpb(resolved); | |
3239 | |
3240 // Get superklass in rax and subklass in rbx | |
3241 __ bind(quicked); | |
304 | 3242 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check |
3243 __ movptr(rax, Address(rcx, rbx, | |
0 | 3244 Address::times_8, sizeof(constantPoolOopDesc))); |
3245 | |
3246 __ bind(resolved); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3247 __ load_klass(rbx, rdx); |
0 | 3248 |
3249 // Generate subtype check. Blows rcx, rdi. Object in rdx. | |
3250 // Superklass in rax. Subklass in rbx. | |
3251 __ gen_subtype_check(rbx, ok_is_subtype); | |
3252 | |
3253 // Come here on failure | |
3254 __ push_ptr(rdx); | |
3255 // object is at TOS | |
3256 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry)); | |
3257 | |
3258 // Come here on success | |
3259 __ bind(ok_is_subtype); | |
304 | 3260 __ mov(rax, rdx); // Restore object in rdx |
0 | 3261 |
3262 // Collect counts on whether this check-cast sees NULLs a lot or not. | |
3263 if (ProfileInterpreter) { | |
3264 __ jmp(done); | |
3265 __ bind(is_null); | |
3266 __ profile_null_seen(rcx); | |
3267 } else { | |
3268 __ bind(is_null); // same as 'done' | |
3269 } | |
3270 __ bind(done); | |
3271 } | |
3272 | |
3273 void TemplateTable::instanceof() { | |
3274 transition(atos, itos); | |
3275 Label done, is_null, ok_is_subtype, quicked, resolved; | |
304 | 3276 __ testptr(rax, rax); |
0 | 3277 __ jcc(Assembler::zero, is_null); |
3278 | |
3279 // Get cpool & tags index | |
3280 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array | |
3281 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index | |
3282 // See if bytecode has already been quicked | |
3283 __ cmpb(Address(rdx, rbx, | |
3284 Address::times_1, | |
3285 typeArrayOopDesc::header_size(T_BYTE) * wordSize), | |
3286 JVM_CONSTANT_Class); | |
3287 __ jcc(Assembler::equal, quicked); | |
3288 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3289 __ push(atos); // save receiver for result, and for GC |
304 | 3290 __ mov(r12, rcx); // save rcx |
0 | 3291 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3292 __ movq(rcx, r12); // restore rcx |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3293 __ reinit_heapbase(); |
0 | 3294 __ pop_ptr(rdx); // restore receiver |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3295 __ load_klass(rdx, rdx); |
0 | 3296 __ jmpb(resolved); |
3297 | |
3298 // Get superklass in rax and subklass in rdx | |
3299 __ bind(quicked); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3300 __ load_klass(rdx, rax); |
304 | 3301 __ movptr(rax, Address(rcx, rbx, |
3302 Address::times_8, sizeof(constantPoolOopDesc))); | |
0 | 3303 |
3304 __ bind(resolved); | |
3305 | |
3306 // Generate subtype check. Blows rcx, rdi | |
3307 // Superklass in rax. Subklass in rdx. | |
3308 __ gen_subtype_check(rdx, ok_is_subtype); | |
3309 | |
3310 // Come here on failure | |
3311 __ xorl(rax, rax); | |
3312 __ jmpb(done); | |
3313 // Come here on success | |
3314 __ bind(ok_is_subtype); | |
3315 __ movl(rax, 1); | |
3316 | |
3317 // Collect counts on whether this test sees NULLs a lot or not. | |
3318 if (ProfileInterpreter) { | |
3319 __ jmp(done); | |
3320 __ bind(is_null); | |
3321 __ profile_null_seen(rcx); | |
3322 } else { | |
3323 __ bind(is_null); // same as 'done' | |
3324 } | |
3325 __ bind(done); | |
3326 // rax = 0: obj == NULL or obj is not an instanceof the specified klass | |
3327 // rax = 1: obj != NULL and obj is an instanceof the specified klass | |
3328 } | |
3329 | |
3330 //----------------------------------------------------------------------------- | |
3331 // Breakpoints | |
3332 void TemplateTable::_breakpoint() { | |
3333 // Note: We get here even if we are single stepping.. | |
3334 // jbug inists on setting breakpoints at every bytecode | |
3335 // even if we are in single step mode. | |
3336 | |
3337 transition(vtos, vtos); | |
3338 | |
3339 // get the unpatched byte code | |
3340 __ get_method(c_rarg1); | |
3341 __ call_VM(noreg, | |
3342 CAST_FROM_FN_PTR(address, | |
3343 InterpreterRuntime::get_original_bytecode_at), | |
3344 c_rarg1, r13); | |
304 | 3345 __ mov(rbx, rax); |
0 | 3346 |
3347 // post the breakpoint event | |
3348 __ get_method(c_rarg1); | |
3349 __ call_VM(noreg, | |
3350 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), | |
3351 c_rarg1, r13); | |
3352 | |
3353 // complete the execution of original bytecode | |
3354 __ dispatch_only_normal(vtos); | |
3355 } | |
3356 | |
3357 //----------------------------------------------------------------------------- | |
3358 // Exceptions | |
3359 | |
3360 void TemplateTable::athrow() { | |
3361 transition(atos, vtos); | |
3362 __ null_check(rax); | |
3363 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); | |
3364 } | |
3365 | |
3366 //----------------------------------------------------------------------------- | |
3367 // Synchronization | |
3368 // | |
3369 // Note: monitorenter & exit are symmetric routines; which is reflected | |
3370 // in the assembly code structure as well | |
3371 // | |
3372 // Stack layout: | |
3373 // | |
3374 // [expressions ] <--- rsp = expression stack top | |
3375 // .. | |
3376 // [expressions ] | |
3377 // [monitor entry] <--- monitor block top = expression stack bot | |
3378 // .. | |
3379 // [monitor entry] | |
3380 // [frame data ] <--- monitor block bot | |
3381 // ... | |
3382 // [saved rbp ] <--- rbp | |
3383 void TemplateTable::monitorenter() { | |
3384 transition(atos, vtos); | |
3385 | |
3386 // check for NULL object | |
3387 __ null_check(rax); | |
3388 | |
3389 const Address monitor_block_top( | |
3390 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); | |
3391 const Address monitor_block_bot( | |
3392 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); | |
3393 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; | |
3394 | |
3395 Label allocated; | |
3396 | |
3397 // initialize entry pointer | |
3398 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL | |
3399 | |
3400 // find a free slot in the monitor block (result in c_rarg1) | |
3401 { | |
3402 Label entry, loop, exit; | |
304 | 3403 __ movptr(c_rarg3, monitor_block_top); // points to current entry, |
0 | 3404 // starting with top-most entry |
304 | 3405 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom |
0 | 3406 // of monitor block |
3407 __ jmpb(entry); | |
3408 | |
3409 __ bind(loop); | |
3410 // check if current entry is used | |
304 | 3411 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD); |
0 | 3412 // if not used then remember entry in c_rarg1 |
304 | 3413 __ cmov(Assembler::equal, c_rarg1, c_rarg3); |
0 | 3414 // check if current entry is for same object |
304 | 3415 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes())); |
0 | 3416 // if same object then stop searching |
3417 __ jccb(Assembler::equal, exit); | |
3418 // otherwise advance to next entry | |
304 | 3419 __ addptr(c_rarg3, entry_size); |
0 | 3420 __ bind(entry); |
3421 // check if bottom reached | |
304 | 3422 __ cmpptr(c_rarg3, c_rarg2); |
0 | 3423 // if not at bottom then check this entry |
3424 __ jcc(Assembler::notEqual, loop); | |
3425 __ bind(exit); | |
3426 } | |
3427 | |
304 | 3428 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found |
0 | 3429 __ jcc(Assembler::notZero, allocated); // if found, continue with that one |
3430 | |
3431 // allocate one if there's no free slot | |
3432 { | |
3433 Label entry, loop; | |
304 | 3434 // 1. compute new pointers // rsp: old expression stack top |
3435 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom | |
3436 __ subptr(rsp, entry_size); // move expression stack top | |
3437 __ subptr(c_rarg1, entry_size); // move expression stack bottom | |
3438 __ mov(c_rarg3, rsp); // set start value for copy loop | |
3439 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom | |
0 | 3440 __ jmp(entry); |
3441 // 2. move expression stack contents | |
3442 __ bind(loop); | |
304 | 3443 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack |
3444 // word from old location | |
3445 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location | |
3446 __ addptr(c_rarg3, wordSize); // advance to next word | |
0 | 3447 __ bind(entry); |
304 | 3448 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached |
0 | 3449 __ jcc(Assembler::notEqual, loop); // if not at bottom then |
3450 // copy next word | |
3451 } | |
3452 | |
3453 // call run-time routine | |
3454 // c_rarg1: points to monitor entry | |
3455 __ bind(allocated); | |
3456 | |
3457 // Increment bcp to point to the next bytecode, so exception | |
3458 // handling for async. exceptions work correctly. | |
3459 // The object has already been poped from the stack, so the | |
3460 // expression stack looks correct. | |
304 | 3461 __ increment(r13); |
0 | 3462 |
3463 // store object | |
304 | 3464 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax); |
0 | 3465 __ lock_object(c_rarg1); |
3466 | |
3467 // check to make sure this monitor doesn't cause stack overflow after locking | |
3468 __ save_bcp(); // in case of exception | |
3469 __ generate_stack_overflow_check(0); | |
3470 | |
3471 // The bcp has already been incremented. Just need to dispatch to | |
3472 // next instruction. | |
3473 __ dispatch_next(vtos); | |
3474 } | |
3475 | |
3476 | |
3477 void TemplateTable::monitorexit() { | |
3478 transition(atos, vtos); | |
3479 | |
3480 // check for NULL object | |
3481 __ null_check(rax); | |
3482 | |
3483 const Address monitor_block_top( | |
3484 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); | |
3485 const Address monitor_block_bot( | |
3486 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); | |
3487 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; | |
3488 | |
3489 Label found; | |
3490 | |
3491 // find matching slot | |
3492 { | |
3493 Label entry, loop; | |
304 | 3494 __ movptr(c_rarg1, monitor_block_top); // points to current entry, |
0 | 3495 // starting with top-most entry |
304 | 3496 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom |
0 | 3497 // of monitor block |
3498 __ jmpb(entry); | |
3499 | |
3500 __ bind(loop); | |
3501 // check if current entry is for same object | |
304 | 3502 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); |
0 | 3503 // if same object then stop searching |
3504 __ jcc(Assembler::equal, found); | |
3505 // otherwise advance to next entry | |
304 | 3506 __ addptr(c_rarg1, entry_size); |
0 | 3507 __ bind(entry); |
3508 // check if bottom reached | |
304 | 3509 __ cmpptr(c_rarg1, c_rarg2); |
0 | 3510 // if not at bottom then check this entry |
3511 __ jcc(Assembler::notEqual, loop); | |
3512 } | |
3513 | |
3514 // error handling. Unlocking was not block-structured | |
3515 __ call_VM(noreg, CAST_FROM_FN_PTR(address, | |
3516 InterpreterRuntime::throw_illegal_monitor_state_exception)); | |
3517 __ should_not_reach_here(); | |
3518 | |
3519 // call run-time routine | |
3520 // rsi: points to monitor entry | |
3521 __ bind(found); | |
3522 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps) | |
3523 __ unlock_object(c_rarg1); | |
3524 __ pop_ptr(rax); // discard object | |
3525 } | |
3526 | |
3527 | |
3528 // Wide instructions | |
3529 void TemplateTable::wide() { | |
3530 transition(vtos, vtos); | |
3531 __ load_unsigned_byte(rbx, at_bcp(1)); | |
3532 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point)); | |
3533 __ jmp(Address(rscratch1, rbx, Address::times_8)); | |
3534 // Note: the r13 increment step is part of the individual wide | |
3535 // bytecode implementations | |
3536 } | |
3537 | |
3538 | |
3539 // Multi arrays | |
3540 void TemplateTable::multianewarray() { | |
3541 transition(vtos, atos); | |
3542 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions | |
3543 // last dim is on top of stack; we want address of first one: | |
3544 // first_addr = last_addr + (ndims - 1) * wordSize | |
3545 if (TaggedStackInterpreter) __ shll(rax, 1); // index*2 | |
304 | 3546 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize)); |
0 | 3547 call_VM(rax, |
3548 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), | |
3549 c_rarg1); | |
3550 __ load_unsigned_byte(rbx, at_bcp(3)); | |
3551 if (TaggedStackInterpreter) __ shll(rbx, 1); // index*2 | |
304 | 3552 __ lea(rsp, Address(rsp, rbx, Address::times_8)); |
0 | 3553 } |
304 | 3554 #endif // !CC_INTERP |