Mercurial > hg > graal-compiler
annotate src/cpu/x86/vm/templateTable_x86_64.cpp @ 2007:5ddfcf4b079e
7003554: (tiered) assert(is_null_object() || handle() != NULL) failed: cannot embed null pointer
Summary: C1 with profiling doesn't check whether the MDO has been really allocated, which can silently fail if the perm gen is full. The solution is to check if the allocation failed and bailout out of inlining or compilation.
Reviewed-by: kvn, never
author | iveresov |
---|---|
date | Thu, 02 Dec 2010 17:21:12 -0800 |
parents | f95d63e2154a |
children | 0fc262af204f |
rev | line source |
---|---|
0 | 1 /* |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. |
0 | 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
1552
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
20 * or visit www.oracle.com if you need additional information or have any |
c18cbe5936b8
6941466: Oracle rebranding changes for Hotspot repositories
trims
parents:
1506
diff
changeset
|
21 * questions. |
0 | 22 * |
23 */ | |
24 | |
1972 | 25 #include "precompiled.hpp" |
26 #include "interpreter/interpreter.hpp" | |
27 #include "interpreter/interpreterRuntime.hpp" | |
28 #include "interpreter/templateTable.hpp" | |
29 #include "memory/universe.inline.hpp" | |
30 #include "oops/methodDataOop.hpp" | |
31 #include "oops/objArrayKlass.hpp" | |
32 #include "oops/oop.inline.hpp" | |
33 #include "prims/methodHandles.hpp" | |
34 #include "runtime/sharedRuntime.hpp" | |
35 #include "runtime/stubRoutines.hpp" | |
36 #include "runtime/synchronizer.hpp" | |
0 | 37 |
304 | 38 #ifndef CC_INTERP |
39 | |
0 | 40 #define __ _masm-> |
41 | |
42 // Platform-dependent initialization | |
43 | |
44 void TemplateTable::pd_initialize() { | |
45 // No amd64 specific initialization | |
46 } | |
47 | |
48 // Address computation: local variables | |
49 | |
50 static inline Address iaddress(int n) { | |
51 return Address(r14, Interpreter::local_offset_in_bytes(n)); | |
52 } | |
53 | |
54 static inline Address laddress(int n) { | |
55 return iaddress(n + 1); | |
56 } | |
57 | |
58 static inline Address faddress(int n) { | |
59 return iaddress(n); | |
60 } | |
61 | |
62 static inline Address daddress(int n) { | |
63 return laddress(n); | |
64 } | |
65 | |
66 static inline Address aaddress(int n) { | |
67 return iaddress(n); | |
68 } | |
69 | |
70 static inline Address iaddress(Register r) { | |
1506 | 71 return Address(r14, r, Address::times_8); |
0 | 72 } |
73 | |
74 static inline Address laddress(Register r) { | |
75 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1)); | |
76 } | |
77 | |
78 static inline Address faddress(Register r) { | |
79 return iaddress(r); | |
80 } | |
81 | |
82 static inline Address daddress(Register r) { | |
83 return laddress(r); | |
84 } | |
85 | |
86 static inline Address aaddress(Register r) { | |
87 return iaddress(r); | |
88 } | |
89 | |
90 static inline Address at_rsp() { | |
91 return Address(rsp, 0); | |
92 } | |
93 | |
94 // At top of Java expression stack which may be different than esp(). It | |
95 // isn't for category 1 objects. | |
96 static inline Address at_tos () { | |
97 return Address(rsp, Interpreter::expr_offset_in_bytes(0)); | |
98 } | |
99 | |
100 static inline Address at_tos_p1() { | |
101 return Address(rsp, Interpreter::expr_offset_in_bytes(1)); | |
102 } | |
103 | |
104 static inline Address at_tos_p2() { | |
105 return Address(rsp, Interpreter::expr_offset_in_bytes(2)); | |
106 } | |
107 | |
108 static inline Address at_tos_p3() { | |
109 return Address(rsp, Interpreter::expr_offset_in_bytes(3)); | |
110 } | |
111 | |
112 // Condition conversion | |
113 static Assembler::Condition j_not(TemplateTable::Condition cc) { | |
114 switch (cc) { | |
115 case TemplateTable::equal : return Assembler::notEqual; | |
116 case TemplateTable::not_equal : return Assembler::equal; | |
117 case TemplateTable::less : return Assembler::greaterEqual; | |
118 case TemplateTable::less_equal : return Assembler::greater; | |
119 case TemplateTable::greater : return Assembler::lessEqual; | |
120 case TemplateTable::greater_equal: return Assembler::less; | |
121 } | |
122 ShouldNotReachHere(); | |
123 return Assembler::zero; | |
124 } | |
125 | |
126 | |
127 // Miscelaneous helper routines | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
128 // Store an oop (or NULL) at the address described by obj. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
129 // If val == noreg this means store a NULL |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
130 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
131 static void do_oop_store(InterpreterMacroAssembler* _masm, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
132 Address obj, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
133 Register val, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
134 BarrierSet::Name barrier, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
135 bool precise) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
136 assert(val == noreg || val == rax, "parameter is just for looks"); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
137 switch (barrier) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
138 #ifndef SERIALGC |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
139 case BarrierSet::G1SATBCT: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
140 case BarrierSet::G1SATBCTLogging: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
141 { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
142 // flatten object address if needed |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
143 if (obj.index() == noreg && obj.disp() == 0) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
144 if (obj.base() != rdx) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
145 __ movq(rdx, obj.base()); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
146 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
147 } else { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
148 __ leaq(rdx, obj); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
149 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
150 __ g1_write_barrier_pre(rdx, r8, rbx, val != noreg); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
151 if (val == noreg) { |
1047
beb8f45ee9f0
6889740: G1: OpenDS fails with "unhandled exception in compiled code"
johnc
parents:
844
diff
changeset
|
152 __ store_heap_oop_null(Address(rdx, 0)); |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
153 } else { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
154 __ store_heap_oop(Address(rdx, 0), val); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
155 __ g1_write_barrier_post(rdx, val, r8, rbx); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
156 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
157 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
158 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
159 break; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
160 #endif // SERIALGC |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
161 case BarrierSet::CardTableModRef: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
162 case BarrierSet::CardTableExtension: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
163 { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
164 if (val == noreg) { |
1047
beb8f45ee9f0
6889740: G1: OpenDS fails with "unhandled exception in compiled code"
johnc
parents:
844
diff
changeset
|
165 __ store_heap_oop_null(obj); |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
166 } else { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
167 __ store_heap_oop(obj, val); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
168 // flatten object address if needed |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
169 if (!precise || (obj.index() == noreg && obj.disp() == 0)) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
170 __ store_check(obj.base()); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
171 } else { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
172 __ leaq(rdx, obj); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
173 __ store_check(rdx); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
174 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
175 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
176 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
177 break; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
178 case BarrierSet::ModRef: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
179 case BarrierSet::Other: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
180 if (val == noreg) { |
1047
beb8f45ee9f0
6889740: G1: OpenDS fails with "unhandled exception in compiled code"
johnc
parents:
844
diff
changeset
|
181 __ store_heap_oop_null(obj); |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
182 } else { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
183 __ store_heap_oop(obj, val); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
184 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
185 break; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
186 default : |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
187 ShouldNotReachHere(); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
188 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
189 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
190 } |
0 | 191 |
192 Address TemplateTable::at_bcp(int offset) { | |
193 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); | |
194 return Address(r13, offset); | |
195 } | |
196 | |
197 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc, | |
198 Register scratch, | |
199 bool load_bc_into_scratch/*=true*/) { | |
200 if (!RewriteBytecodes) { | |
201 return; | |
202 } | |
203 // the pair bytecodes have already done the load. | |
204 if (load_bc_into_scratch) { | |
205 __ movl(bc, bytecode); | |
206 } | |
207 Label patch_done; | |
208 if (JvmtiExport::can_post_breakpoint()) { | |
209 Label fast_patch; | |
210 // if a breakpoint is present we can't rewrite the stream directly | |
211 __ movzbl(scratch, at_bcp(0)); | |
212 __ cmpl(scratch, Bytecodes::_breakpoint); | |
213 __ jcc(Assembler::notEqual, fast_patch); | |
214 __ get_method(scratch); | |
215 // Let breakpoint table handling rewrite to quicker bytecode | |
1108 | 216 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, r13, bc); |
0 | 217 #ifndef ASSERT |
218 __ jmpb(patch_done); | |
1108 | 219 #else |
220 __ jmp(patch_done); | |
221 #endif | |
0 | 222 __ bind(fast_patch); |
223 } | |
1108 | 224 #ifdef ASSERT |
0 | 225 Label okay; |
226 __ load_unsigned_byte(scratch, at_bcp(0)); | |
227 __ cmpl(scratch, (int) Bytecodes::java_code(bytecode)); | |
228 __ jcc(Assembler::equal, okay); | |
229 __ cmpl(scratch, bc); | |
230 __ jcc(Assembler::equal, okay); | |
231 __ stop("patching the wrong bytecode"); | |
232 __ bind(okay); | |
233 #endif | |
234 // patch bytecode | |
235 __ movb(at_bcp(0), bc); | |
236 __ bind(patch_done); | |
237 } | |
238 | |
239 | |
240 // Individual instructions | |
241 | |
242 void TemplateTable::nop() { | |
243 transition(vtos, vtos); | |
244 // nothing to do | |
245 } | |
246 | |
247 void TemplateTable::shouldnotreachhere() { | |
248 transition(vtos, vtos); | |
249 __ stop("shouldnotreachhere bytecode"); | |
250 } | |
251 | |
252 void TemplateTable::aconst_null() { | |
253 transition(vtos, atos); | |
254 __ xorl(rax, rax); | |
255 } | |
256 | |
257 void TemplateTable::iconst(int value) { | |
258 transition(vtos, itos); | |
259 if (value == 0) { | |
260 __ xorl(rax, rax); | |
261 } else { | |
262 __ movl(rax, value); | |
263 } | |
264 } | |
265 | |
266 void TemplateTable::lconst(int value) { | |
267 transition(vtos, ltos); | |
268 if (value == 0) { | |
269 __ xorl(rax, rax); | |
270 } else { | |
271 __ movl(rax, value); | |
272 } | |
273 } | |
274 | |
275 void TemplateTable::fconst(int value) { | |
276 transition(vtos, ftos); | |
277 static float one = 1.0f, two = 2.0f; | |
278 switch (value) { | |
279 case 0: | |
280 __ xorps(xmm0, xmm0); | |
281 break; | |
282 case 1: | |
283 __ movflt(xmm0, ExternalAddress((address) &one)); | |
284 break; | |
285 case 2: | |
286 __ movflt(xmm0, ExternalAddress((address) &two)); | |
287 break; | |
288 default: | |
289 ShouldNotReachHere(); | |
290 break; | |
291 } | |
292 } | |
293 | |
294 void TemplateTable::dconst(int value) { | |
295 transition(vtos, dtos); | |
296 static double one = 1.0; | |
297 switch (value) { | |
298 case 0: | |
299 __ xorpd(xmm0, xmm0); | |
300 break; | |
301 case 1: | |
302 __ movdbl(xmm0, ExternalAddress((address) &one)); | |
303 break; | |
304 default: | |
305 ShouldNotReachHere(); | |
306 break; | |
307 } | |
308 } | |
309 | |
310 void TemplateTable::bipush() { | |
311 transition(vtos, itos); | |
312 __ load_signed_byte(rax, at_bcp(1)); | |
313 } | |
314 | |
315 void TemplateTable::sipush() { | |
316 transition(vtos, itos); | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
317 __ load_unsigned_short(rax, at_bcp(1)); |
0 | 318 __ bswapl(rax); |
319 __ sarl(rax, 16); | |
320 } | |
321 | |
322 void TemplateTable::ldc(bool wide) { | |
323 transition(vtos, vtos); | |
324 Label call_ldc, notFloat, notClass, Done; | |
325 | |
326 if (wide) { | |
327 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); | |
328 } else { | |
329 __ load_unsigned_byte(rbx, at_bcp(1)); | |
330 } | |
331 | |
332 __ get_cpool_and_tags(rcx, rax); | |
333 const int base_offset = constantPoolOopDesc::header_size() * wordSize; | |
334 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; | |
335 | |
336 // get type | |
337 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset)); | |
338 | |
339 // unresolved string - get the resolved string | |
340 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString); | |
341 __ jccb(Assembler::equal, call_ldc); | |
342 | |
343 // unresolved class - get the resolved class | |
344 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass); | |
345 __ jccb(Assembler::equal, call_ldc); | |
346 | |
347 // unresolved class in error state - call into runtime to throw the error | |
348 // from the first resolution attempt | |
349 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError); | |
350 __ jccb(Assembler::equal, call_ldc); | |
351 | |
352 // resolved class - need to call vm to get java mirror of the class | |
353 __ cmpl(rdx, JVM_CONSTANT_Class); | |
354 __ jcc(Assembler::notEqual, notClass); | |
355 | |
356 __ bind(call_ldc); | |
357 __ movl(c_rarg1, wide); | |
358 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1); | |
359 __ push_ptr(rax); | |
360 __ verify_oop(rax); | |
361 __ jmp(Done); | |
362 | |
363 __ bind(notClass); | |
364 __ cmpl(rdx, JVM_CONSTANT_Float); | |
365 __ jccb(Assembler::notEqual, notFloat); | |
366 // ftos | |
367 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); | |
368 __ push_f(); | |
369 __ jmp(Done); | |
370 | |
371 __ bind(notFloat); | |
372 #ifdef ASSERT | |
373 { | |
374 Label L; | |
375 __ cmpl(rdx, JVM_CONSTANT_Integer); | |
376 __ jcc(Assembler::equal, L); | |
377 __ cmpl(rdx, JVM_CONSTANT_String); | |
378 __ jcc(Assembler::equal, L); | |
379 __ stop("unexpected tag type in ldc"); | |
380 __ bind(L); | |
381 } | |
382 #endif | |
383 // atos and itos | |
384 Label isOop; | |
385 __ cmpl(rdx, JVM_CONSTANT_Integer); | |
386 __ jcc(Assembler::notEqual, isOop); | |
387 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset)); | |
388 __ push_i(rax); | |
389 __ jmp(Done); | |
390 | |
391 __ bind(isOop); | |
304 | 392 __ movptr(rax, Address(rcx, rbx, Address::times_8, base_offset)); |
0 | 393 __ push_ptr(rax); |
394 | |
395 if (VerifyOops) { | |
396 __ verify_oop(rax); | |
397 } | |
398 | |
399 __ bind(Done); | |
400 } | |
401 | |
1602 | 402 // Fast path for caching oop constants. |
403 // %%% We should use this to handle Class and String constants also. | |
404 // %%% It will simplify the ldc/primitive path considerably. | |
405 void TemplateTable::fast_aldc(bool wide) { | |
406 transition(vtos, atos); | |
407 | |
408 if (!EnableMethodHandles) { | |
409 // We should not encounter this bytecode if !EnableMethodHandles. | |
410 // The verifier will stop it. However, if we get past the verifier, | |
411 // this will stop the thread in a reasonable way, without crashing the JVM. | |
412 __ call_VM(noreg, CAST_FROM_FN_PTR(address, | |
413 InterpreterRuntime::throw_IncompatibleClassChangeError)); | |
414 // the call_VM checks for exception, so we should never return here. | |
415 __ should_not_reach_here(); | |
416 return; | |
417 } | |
418 | |
419 const Register cache = rcx; | |
420 const Register index = rdx; | |
421 | |
422 resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); | |
423 if (VerifyOops) { | |
424 __ verify_oop(rax); | |
425 } | |
1913
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
426 |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
427 Label L_done, L_throw_exception; |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
428 const Register con_klass_temp = rcx; // same as cache |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
429 const Register array_klass_temp = rdx; // same as index |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
430 __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes())); |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
431 __ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr())); |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
432 __ cmpptr(con_klass_temp, Address(array_klass_temp, 0)); |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
433 __ jcc(Assembler::notEqual, L_done); |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
434 __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0); |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
435 __ jcc(Assembler::notEqual, L_throw_exception); |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
436 __ xorptr(rax, rax); |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
437 __ jmp(L_done); |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
438 |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
439 // Load the exception from the system-array which wraps it: |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
440 __ bind(L_throw_exception); |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
441 __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
442 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
443 |
3b2dea75431e
6984311: JSR 292 needs optional bootstrap method parameters
jrose
parents:
1846
diff
changeset
|
444 __ bind(L_done); |
1602 | 445 } |
446 | |
0 | 447 void TemplateTable::ldc2_w() { |
448 transition(vtos, vtos); | |
449 Label Long, Done; | |
450 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); | |
451 | |
452 __ get_cpool_and_tags(rcx, rax); | |
453 const int base_offset = constantPoolOopDesc::header_size() * wordSize; | |
454 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; | |
455 | |
456 // get type | |
457 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), | |
458 JVM_CONSTANT_Double); | |
459 __ jccb(Assembler::notEqual, Long); | |
460 // dtos | |
461 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); | |
462 __ push_d(); | |
463 __ jmpb(Done); | |
464 | |
465 __ bind(Long); | |
466 // ltos | |
467 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset)); | |
468 __ push_l(); | |
469 | |
470 __ bind(Done); | |
471 } | |
472 | |
473 void TemplateTable::locals_index(Register reg, int offset) { | |
474 __ load_unsigned_byte(reg, at_bcp(offset)); | |
304 | 475 __ negptr(reg); |
0 | 476 } |
477 | |
478 void TemplateTable::iload() { | |
479 transition(vtos, itos); | |
480 if (RewriteFrequentPairs) { | |
481 Label rewrite, done; | |
482 const Register bc = c_rarg3; | |
483 assert(rbx != bc, "register damaged"); | |
484 | |
485 // get next byte | |
486 __ load_unsigned_byte(rbx, | |
487 at_bcp(Bytecodes::length_for(Bytecodes::_iload))); | |
488 // if _iload, wait to rewrite to iload2. We only want to rewrite the | |
489 // last two iloads in a pair. Comparing against fast_iload means that | |
490 // the next bytecode is neither an iload or a caload, and therefore | |
491 // an iload pair. | |
492 __ cmpl(rbx, Bytecodes::_iload); | |
493 __ jcc(Assembler::equal, done); | |
494 | |
495 __ cmpl(rbx, Bytecodes::_fast_iload); | |
496 __ movl(bc, Bytecodes::_fast_iload2); | |
497 __ jccb(Assembler::equal, rewrite); | |
498 | |
499 // if _caload, rewrite to fast_icaload | |
500 __ cmpl(rbx, Bytecodes::_caload); | |
501 __ movl(bc, Bytecodes::_fast_icaload); | |
502 __ jccb(Assembler::equal, rewrite); | |
503 | |
504 // rewrite so iload doesn't check again. | |
505 __ movl(bc, Bytecodes::_fast_iload); | |
506 | |
507 // rewrite | |
508 // bc: fast bytecode | |
509 __ bind(rewrite); | |
510 patch_bytecode(Bytecodes::_iload, bc, rbx, false); | |
511 __ bind(done); | |
512 } | |
513 | |
514 // Get the local value into tos | |
515 locals_index(rbx); | |
516 __ movl(rax, iaddress(rbx)); | |
517 } | |
518 | |
519 void TemplateTable::fast_iload2() { | |
520 transition(vtos, itos); | |
521 locals_index(rbx); | |
522 __ movl(rax, iaddress(rbx)); | |
523 __ push(itos); | |
524 locals_index(rbx, 3); | |
525 __ movl(rax, iaddress(rbx)); | |
526 } | |
527 | |
528 void TemplateTable::fast_iload() { | |
529 transition(vtos, itos); | |
530 locals_index(rbx); | |
531 __ movl(rax, iaddress(rbx)); | |
532 } | |
533 | |
534 void TemplateTable::lload() { | |
535 transition(vtos, ltos); | |
536 locals_index(rbx); | |
537 __ movq(rax, laddress(rbx)); | |
538 } | |
539 | |
540 void TemplateTable::fload() { | |
541 transition(vtos, ftos); | |
542 locals_index(rbx); | |
543 __ movflt(xmm0, faddress(rbx)); | |
544 } | |
545 | |
546 void TemplateTable::dload() { | |
547 transition(vtos, dtos); | |
548 locals_index(rbx); | |
549 __ movdbl(xmm0, daddress(rbx)); | |
550 } | |
551 | |
552 void TemplateTable::aload() { | |
553 transition(vtos, atos); | |
554 locals_index(rbx); | |
304 | 555 __ movptr(rax, aaddress(rbx)); |
0 | 556 } |
557 | |
558 void TemplateTable::locals_index_wide(Register reg) { | |
559 __ movl(reg, at_bcp(2)); | |
560 __ bswapl(reg); | |
561 __ shrl(reg, 16); | |
304 | 562 __ negptr(reg); |
0 | 563 } |
564 | |
565 void TemplateTable::wide_iload() { | |
566 transition(vtos, itos); | |
567 locals_index_wide(rbx); | |
568 __ movl(rax, iaddress(rbx)); | |
569 } | |
570 | |
571 void TemplateTable::wide_lload() { | |
572 transition(vtos, ltos); | |
573 locals_index_wide(rbx); | |
574 __ movq(rax, laddress(rbx)); | |
575 } | |
576 | |
577 void TemplateTable::wide_fload() { | |
578 transition(vtos, ftos); | |
579 locals_index_wide(rbx); | |
580 __ movflt(xmm0, faddress(rbx)); | |
581 } | |
582 | |
583 void TemplateTable::wide_dload() { | |
584 transition(vtos, dtos); | |
585 locals_index_wide(rbx); | |
586 __ movdbl(xmm0, daddress(rbx)); | |
587 } | |
588 | |
589 void TemplateTable::wide_aload() { | |
590 transition(vtos, atos); | |
591 locals_index_wide(rbx); | |
304 | 592 __ movptr(rax, aaddress(rbx)); |
0 | 593 } |
594 | |
595 void TemplateTable::index_check(Register array, Register index) { | |
596 // destroys rbx | |
597 // check array | |
598 __ null_check(array, arrayOopDesc::length_offset_in_bytes()); | |
599 // sign extend index for use by indexed load | |
304 | 600 __ movl2ptr(index, index); |
0 | 601 // check index |
602 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); | |
603 if (index != rbx) { | |
604 // ??? convention: move aberrant index into ebx for exception message | |
605 assert(rbx != array, "different registers"); | |
606 __ movl(rbx, index); | |
607 } | |
608 __ jump_cc(Assembler::aboveEqual, | |
609 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry)); | |
610 } | |
611 | |
612 void TemplateTable::iaload() { | |
613 transition(itos, itos); | |
614 __ pop_ptr(rdx); | |
615 // eax: index | |
616 // rdx: array | |
617 index_check(rdx, rax); // kills rbx | |
618 __ movl(rax, Address(rdx, rax, | |
619 Address::times_4, | |
620 arrayOopDesc::base_offset_in_bytes(T_INT))); | |
621 } | |
622 | |
623 void TemplateTable::laload() { | |
624 transition(itos, ltos); | |
625 __ pop_ptr(rdx); | |
626 // eax: index | |
627 // rdx: array | |
628 index_check(rdx, rax); // kills rbx | |
629 __ movq(rax, Address(rdx, rbx, | |
630 Address::times_8, | |
631 arrayOopDesc::base_offset_in_bytes(T_LONG))); | |
632 } | |
633 | |
634 void TemplateTable::faload() { | |
635 transition(itos, ftos); | |
636 __ pop_ptr(rdx); | |
637 // eax: index | |
638 // rdx: array | |
639 index_check(rdx, rax); // kills rbx | |
640 __ movflt(xmm0, Address(rdx, rax, | |
641 Address::times_4, | |
642 arrayOopDesc::base_offset_in_bytes(T_FLOAT))); | |
643 } | |
644 | |
645 void TemplateTable::daload() { | |
646 transition(itos, dtos); | |
647 __ pop_ptr(rdx); | |
648 // eax: index | |
649 // rdx: array | |
650 index_check(rdx, rax); // kills rbx | |
651 __ movdbl(xmm0, Address(rdx, rax, | |
652 Address::times_8, | |
653 arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); | |
654 } | |
655 | |
656 void TemplateTable::aaload() { | |
657 transition(itos, atos); | |
658 __ pop_ptr(rdx); | |
659 // eax: index | |
660 // rdx: array | |
661 index_check(rdx, rax); // kills rbx | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
662 __ load_heap_oop(rax, Address(rdx, rax, |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
663 UseCompressedOops ? Address::times_4 : Address::times_8, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
664 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); |
0 | 665 } |
666 | |
667 void TemplateTable::baload() { | |
668 transition(itos, itos); | |
669 __ pop_ptr(rdx); | |
670 // eax: index | |
671 // rdx: array | |
672 index_check(rdx, rax); // kills rbx | |
673 __ load_signed_byte(rax, | |
674 Address(rdx, rax, | |
675 Address::times_1, | |
676 arrayOopDesc::base_offset_in_bytes(T_BYTE))); | |
677 } | |
678 | |
679 void TemplateTable::caload() { | |
680 transition(itos, itos); | |
681 __ pop_ptr(rdx); | |
682 // eax: index | |
683 // rdx: array | |
684 index_check(rdx, rax); // kills rbx | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
685 __ load_unsigned_short(rax, |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
686 Address(rdx, rax, |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
687 Address::times_2, |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
688 arrayOopDesc::base_offset_in_bytes(T_CHAR))); |
0 | 689 } |
690 | |
691 // iload followed by caload frequent pair | |
692 void TemplateTable::fast_icaload() { | |
693 transition(vtos, itos); | |
694 // load index out of locals | |
695 locals_index(rbx); | |
696 __ movl(rax, iaddress(rbx)); | |
697 | |
698 // eax: index | |
699 // rdx: array | |
700 __ pop_ptr(rdx); | |
701 index_check(rdx, rax); // kills rbx | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
702 __ load_unsigned_short(rax, |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
703 Address(rdx, rax, |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
704 Address::times_2, |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
705 arrayOopDesc::base_offset_in_bytes(T_CHAR))); |
0 | 706 } |
707 | |
708 void TemplateTable::saload() { | |
709 transition(itos, itos); | |
710 __ pop_ptr(rdx); | |
711 // eax: index | |
712 // rdx: array | |
713 index_check(rdx, rax); // kills rbx | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
714 __ load_signed_short(rax, |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
715 Address(rdx, rax, |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
716 Address::times_2, |
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
717 arrayOopDesc::base_offset_in_bytes(T_SHORT))); |
0 | 718 } |
719 | |
720 void TemplateTable::iload(int n) { | |
721 transition(vtos, itos); | |
722 __ movl(rax, iaddress(n)); | |
723 } | |
724 | |
725 void TemplateTable::lload(int n) { | |
726 transition(vtos, ltos); | |
727 __ movq(rax, laddress(n)); | |
728 } | |
729 | |
730 void TemplateTable::fload(int n) { | |
731 transition(vtos, ftos); | |
732 __ movflt(xmm0, faddress(n)); | |
733 } | |
734 | |
735 void TemplateTable::dload(int n) { | |
736 transition(vtos, dtos); | |
737 __ movdbl(xmm0, daddress(n)); | |
738 } | |
739 | |
740 void TemplateTable::aload(int n) { | |
741 transition(vtos, atos); | |
304 | 742 __ movptr(rax, aaddress(n)); |
0 | 743 } |
744 | |
745 void TemplateTable::aload_0() { | |
746 transition(vtos, atos); | |
747 // According to bytecode histograms, the pairs: | |
748 // | |
749 // _aload_0, _fast_igetfield | |
750 // _aload_0, _fast_agetfield | |
751 // _aload_0, _fast_fgetfield | |
752 // | |
753 // occur frequently. If RewriteFrequentPairs is set, the (slow) | |
754 // _aload_0 bytecode checks if the next bytecode is either | |
755 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then | |
756 // rewrites the current bytecode into a pair bytecode; otherwise it | |
757 // rewrites the current bytecode into _fast_aload_0 that doesn't do | |
758 // the pair check anymore. | |
759 // | |
760 // Note: If the next bytecode is _getfield, the rewrite must be | |
761 // delayed, otherwise we may miss an opportunity for a pair. | |
762 // | |
763 // Also rewrite frequent pairs | |
764 // aload_0, aload_1 | |
765 // aload_0, iload_1 | |
766 // These bytecodes with a small amount of code are most profitable | |
767 // to rewrite | |
768 if (RewriteFrequentPairs) { | |
769 Label rewrite, done; | |
770 const Register bc = c_rarg3; | |
771 assert(rbx != bc, "register damaged"); | |
772 // get next byte | |
773 __ load_unsigned_byte(rbx, | |
774 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); | |
775 | |
776 // do actual aload_0 | |
777 aload(0); | |
778 | |
779 // if _getfield then wait with rewrite | |
780 __ cmpl(rbx, Bytecodes::_getfield); | |
781 __ jcc(Assembler::equal, done); | |
782 | |
783 // if _igetfield then reqrite to _fast_iaccess_0 | |
784 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == | |
785 Bytecodes::_aload_0, | |
786 "fix bytecode definition"); | |
787 __ cmpl(rbx, Bytecodes::_fast_igetfield); | |
788 __ movl(bc, Bytecodes::_fast_iaccess_0); | |
789 __ jccb(Assembler::equal, rewrite); | |
790 | |
791 // if _agetfield then reqrite to _fast_aaccess_0 | |
792 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == | |
793 Bytecodes::_aload_0, | |
794 "fix bytecode definition"); | |
795 __ cmpl(rbx, Bytecodes::_fast_agetfield); | |
796 __ movl(bc, Bytecodes::_fast_aaccess_0); | |
797 __ jccb(Assembler::equal, rewrite); | |
798 | |
799 // if _fgetfield then reqrite to _fast_faccess_0 | |
800 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == | |
801 Bytecodes::_aload_0, | |
802 "fix bytecode definition"); | |
803 __ cmpl(rbx, Bytecodes::_fast_fgetfield); | |
804 __ movl(bc, Bytecodes::_fast_faccess_0); | |
805 __ jccb(Assembler::equal, rewrite); | |
806 | |
807 // else rewrite to _fast_aload0 | |
808 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == | |
809 Bytecodes::_aload_0, | |
810 "fix bytecode definition"); | |
811 __ movl(bc, Bytecodes::_fast_aload_0); | |
812 | |
813 // rewrite | |
814 // bc: fast bytecode | |
815 __ bind(rewrite); | |
816 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false); | |
817 | |
818 __ bind(done); | |
819 } else { | |
820 aload(0); | |
821 } | |
822 } | |
823 | |
824 void TemplateTable::istore() { | |
825 transition(itos, vtos); | |
826 locals_index(rbx); | |
827 __ movl(iaddress(rbx), rax); | |
828 } | |
829 | |
830 void TemplateTable::lstore() { | |
831 transition(ltos, vtos); | |
832 locals_index(rbx); | |
833 __ movq(laddress(rbx), rax); | |
834 } | |
835 | |
836 void TemplateTable::fstore() { | |
837 transition(ftos, vtos); | |
838 locals_index(rbx); | |
839 __ movflt(faddress(rbx), xmm0); | |
840 } | |
841 | |
842 void TemplateTable::dstore() { | |
843 transition(dtos, vtos); | |
844 locals_index(rbx); | |
845 __ movdbl(daddress(rbx), xmm0); | |
846 } | |
847 | |
848 void TemplateTable::astore() { | |
849 transition(vtos, vtos); | |
1506 | 850 __ pop_ptr(rax); |
0 | 851 locals_index(rbx); |
304 | 852 __ movptr(aaddress(rbx), rax); |
0 | 853 } |
854 | |
855 void TemplateTable::wide_istore() { | |
856 transition(vtos, vtos); | |
857 __ pop_i(); | |
858 locals_index_wide(rbx); | |
859 __ movl(iaddress(rbx), rax); | |
860 } | |
861 | |
862 void TemplateTable::wide_lstore() { | |
863 transition(vtos, vtos); | |
864 __ pop_l(); | |
865 locals_index_wide(rbx); | |
866 __ movq(laddress(rbx), rax); | |
867 } | |
868 | |
869 void TemplateTable::wide_fstore() { | |
870 transition(vtos, vtos); | |
871 __ pop_f(); | |
872 locals_index_wide(rbx); | |
873 __ movflt(faddress(rbx), xmm0); | |
874 } | |
875 | |
876 void TemplateTable::wide_dstore() { | |
877 transition(vtos, vtos); | |
878 __ pop_d(); | |
879 locals_index_wide(rbx); | |
880 __ movdbl(daddress(rbx), xmm0); | |
881 } | |
882 | |
883 void TemplateTable::wide_astore() { | |
884 transition(vtos, vtos); | |
1506 | 885 __ pop_ptr(rax); |
0 | 886 locals_index_wide(rbx); |
304 | 887 __ movptr(aaddress(rbx), rax); |
0 | 888 } |
889 | |
890 void TemplateTable::iastore() { | |
891 transition(itos, vtos); | |
892 __ pop_i(rbx); | |
893 __ pop_ptr(rdx); | |
894 // eax: value | |
895 // ebx: index | |
896 // rdx: array | |
897 index_check(rdx, rbx); // prefer index in ebx | |
898 __ movl(Address(rdx, rbx, | |
899 Address::times_4, | |
900 arrayOopDesc::base_offset_in_bytes(T_INT)), | |
901 rax); | |
902 } | |
903 | |
904 void TemplateTable::lastore() { | |
905 transition(ltos, vtos); | |
906 __ pop_i(rbx); | |
907 __ pop_ptr(rdx); | |
908 // rax: value | |
909 // ebx: index | |
910 // rdx: array | |
911 index_check(rdx, rbx); // prefer index in ebx | |
912 __ movq(Address(rdx, rbx, | |
913 Address::times_8, | |
914 arrayOopDesc::base_offset_in_bytes(T_LONG)), | |
915 rax); | |
916 } | |
917 | |
918 void TemplateTable::fastore() { | |
919 transition(ftos, vtos); | |
920 __ pop_i(rbx); | |
921 __ pop_ptr(rdx); | |
922 // xmm0: value | |
923 // ebx: index | |
924 // rdx: array | |
925 index_check(rdx, rbx); // prefer index in ebx | |
926 __ movflt(Address(rdx, rbx, | |
927 Address::times_4, | |
928 arrayOopDesc::base_offset_in_bytes(T_FLOAT)), | |
929 xmm0); | |
930 } | |
931 | |
932 void TemplateTable::dastore() { | |
933 transition(dtos, vtos); | |
934 __ pop_i(rbx); | |
935 __ pop_ptr(rdx); | |
936 // xmm0: value | |
937 // ebx: index | |
938 // rdx: array | |
939 index_check(rdx, rbx); // prefer index in ebx | |
940 __ movdbl(Address(rdx, rbx, | |
941 Address::times_8, | |
942 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)), | |
943 xmm0); | |
944 } | |
945 | |
946 void TemplateTable::aastore() { | |
947 Label is_null, ok_is_subtype, done; | |
948 transition(vtos, vtos); | |
949 // stack: ..., array, index, value | |
304 | 950 __ movptr(rax, at_tos()); // value |
0 | 951 __ movl(rcx, at_tos_p1()); // index |
304 | 952 __ movptr(rdx, at_tos_p2()); // array |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
953 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
954 Address element_address(rdx, rcx, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
955 UseCompressedOops? Address::times_4 : Address::times_8, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
956 arrayOopDesc::base_offset_in_bytes(T_OBJECT)); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
957 |
0 | 958 index_check(rdx, rcx); // kills rbx |
959 // do array store check - check for NULL value first | |
304 | 960 __ testptr(rax, rax); |
0 | 961 __ jcc(Assembler::zero, is_null); |
962 | |
963 // Move subklass into rbx | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
964 __ load_klass(rbx, rax); |
0 | 965 // Move superklass into rax |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
966 __ load_klass(rax, rdx); |
304 | 967 __ movptr(rax, Address(rax, |
968 sizeof(oopDesc) + | |
969 objArrayKlass::element_klass_offset_in_bytes())); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
970 // Compress array + index*oopSize + 12 into a single register. Frees rcx. |
362 | 971 __ lea(rdx, element_address); |
0 | 972 |
973 // Generate subtype check. Blows rcx, rdi | |
974 // Superklass in rax. Subklass in rbx. | |
975 __ gen_subtype_check(rbx, ok_is_subtype); | |
976 | |
977 // Come here on failure | |
978 // object is at TOS | |
979 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); | |
980 | |
981 // Come here on success | |
982 __ bind(ok_is_subtype); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
983 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
984 // Get the value we will store |
362 | 985 __ movptr(rax, at_tos()); |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
986 // Now store using the appropriate barrier |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
987 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true); |
0 | 988 __ jmp(done); |
989 | |
990 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] | |
991 __ bind(is_null); | |
992 __ profile_null_seen(rbx); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
993 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
994 // Store a NULL |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
995 do_oop_store(_masm, element_address, noreg, _bs->kind(), true); |
0 | 996 |
997 // Pop stack arguments | |
998 __ bind(done); | |
1506 | 999 __ addptr(rsp, 3 * Interpreter::stackElementSize); |
0 | 1000 } |
1001 | |
1002 void TemplateTable::bastore() { | |
1003 transition(itos, vtos); | |
1004 __ pop_i(rbx); | |
1005 __ pop_ptr(rdx); | |
1006 // eax: value | |
1007 // ebx: index | |
1008 // rdx: array | |
1009 index_check(rdx, rbx); // prefer index in ebx | |
1010 __ movb(Address(rdx, rbx, | |
1011 Address::times_1, | |
1012 arrayOopDesc::base_offset_in_bytes(T_BYTE)), | |
1013 rax); | |
1014 } | |
1015 | |
1016 void TemplateTable::castore() { | |
1017 transition(itos, vtos); | |
1018 __ pop_i(rbx); | |
1019 __ pop_ptr(rdx); | |
1020 // eax: value | |
1021 // ebx: index | |
1022 // rdx: array | |
1023 index_check(rdx, rbx); // prefer index in ebx | |
1024 __ movw(Address(rdx, rbx, | |
1025 Address::times_2, | |
1026 arrayOopDesc::base_offset_in_bytes(T_CHAR)), | |
1027 rax); | |
1028 } | |
1029 | |
1030 void TemplateTable::sastore() { | |
1031 castore(); | |
1032 } | |
1033 | |
1034 void TemplateTable::istore(int n) { | |
1035 transition(itos, vtos); | |
1036 __ movl(iaddress(n), rax); | |
1037 } | |
1038 | |
1039 void TemplateTable::lstore(int n) { | |
1040 transition(ltos, vtos); | |
1041 __ movq(laddress(n), rax); | |
1042 } | |
1043 | |
1044 void TemplateTable::fstore(int n) { | |
1045 transition(ftos, vtos); | |
1046 __ movflt(faddress(n), xmm0); | |
1047 } | |
1048 | |
1049 void TemplateTable::dstore(int n) { | |
1050 transition(dtos, vtos); | |
1051 __ movdbl(daddress(n), xmm0); | |
1052 } | |
1053 | |
1054 void TemplateTable::astore(int n) { | |
1055 transition(vtos, vtos); | |
1506 | 1056 __ pop_ptr(rax); |
304 | 1057 __ movptr(aaddress(n), rax); |
0 | 1058 } |
1059 | |
1060 void TemplateTable::pop() { | |
1061 transition(vtos, vtos); | |
1506 | 1062 __ addptr(rsp, Interpreter::stackElementSize); |
0 | 1063 } |
1064 | |
1065 void TemplateTable::pop2() { | |
1066 transition(vtos, vtos); | |
1506 | 1067 __ addptr(rsp, 2 * Interpreter::stackElementSize); |
0 | 1068 } |
1069 | |
1070 void TemplateTable::dup() { | |
1071 transition(vtos, vtos); | |
1506 | 1072 __ load_ptr(0, rax); |
1073 __ push_ptr(rax); | |
0 | 1074 // stack: ..., a, a |
1075 } | |
1076 | |
1077 void TemplateTable::dup_x1() { | |
1078 transition(vtos, vtos); | |
1079 // stack: ..., a, b | |
1506 | 1080 __ load_ptr( 0, rax); // load b |
1081 __ load_ptr( 1, rcx); // load a | |
1082 __ store_ptr(1, rax); // store b | |
1083 __ store_ptr(0, rcx); // store a | |
1084 __ push_ptr(rax); // push b | |
0 | 1085 // stack: ..., b, a, b |
1086 } | |
1087 | |
1088 void TemplateTable::dup_x2() { | |
1089 transition(vtos, vtos); | |
1090 // stack: ..., a, b, c | |
1506 | 1091 __ load_ptr( 0, rax); // load c |
1092 __ load_ptr( 2, rcx); // load a | |
1093 __ store_ptr(2, rax); // store c in a | |
1094 __ push_ptr(rax); // push c | |
0 | 1095 // stack: ..., c, b, c, c |
1506 | 1096 __ load_ptr( 2, rax); // load b |
1097 __ store_ptr(2, rcx); // store a in b | |
0 | 1098 // stack: ..., c, a, c, c |
1506 | 1099 __ store_ptr(1, rax); // store b in c |
0 | 1100 // stack: ..., c, a, b, c |
1101 } | |
1102 | |
1103 void TemplateTable::dup2() { | |
1104 transition(vtos, vtos); | |
1105 // stack: ..., a, b | |
1506 | 1106 __ load_ptr(1, rax); // load a |
1107 __ push_ptr(rax); // push a | |
1108 __ load_ptr(1, rax); // load b | |
1109 __ push_ptr(rax); // push b | |
0 | 1110 // stack: ..., a, b, a, b |
1111 } | |
1112 | |
1113 void TemplateTable::dup2_x1() { | |
1114 transition(vtos, vtos); | |
1115 // stack: ..., a, b, c | |
1506 | 1116 __ load_ptr( 0, rcx); // load c |
1117 __ load_ptr( 1, rax); // load b | |
1118 __ push_ptr(rax); // push b | |
1119 __ push_ptr(rcx); // push c | |
0 | 1120 // stack: ..., a, b, c, b, c |
1506 | 1121 __ store_ptr(3, rcx); // store c in b |
0 | 1122 // stack: ..., a, c, c, b, c |
1506 | 1123 __ load_ptr( 4, rcx); // load a |
1124 __ store_ptr(2, rcx); // store a in 2nd c | |
0 | 1125 // stack: ..., a, c, a, b, c |
1506 | 1126 __ store_ptr(4, rax); // store b in a |
0 | 1127 // stack: ..., b, c, a, b, c |
1128 } | |
1129 | |
1130 void TemplateTable::dup2_x2() { | |
1131 transition(vtos, vtos); | |
1132 // stack: ..., a, b, c, d | |
1506 | 1133 __ load_ptr( 0, rcx); // load d |
1134 __ load_ptr( 1, rax); // load c | |
1135 __ push_ptr(rax); // push c | |
1136 __ push_ptr(rcx); // push d | |
0 | 1137 // stack: ..., a, b, c, d, c, d |
1506 | 1138 __ load_ptr( 4, rax); // load b |
1139 __ store_ptr(2, rax); // store b in d | |
1140 __ store_ptr(4, rcx); // store d in b | |
0 | 1141 // stack: ..., a, d, c, b, c, d |
1506 | 1142 __ load_ptr( 5, rcx); // load a |
1143 __ load_ptr( 3, rax); // load c | |
1144 __ store_ptr(3, rcx); // store a in c | |
1145 __ store_ptr(5, rax); // store c in a | |
0 | 1146 // stack: ..., c, d, a, b, c, d |
1147 } | |
1148 | |
1149 void TemplateTable::swap() { | |
1150 transition(vtos, vtos); | |
1151 // stack: ..., a, b | |
1506 | 1152 __ load_ptr( 1, rcx); // load a |
1153 __ load_ptr( 0, rax); // load b | |
1154 __ store_ptr(0, rcx); // store a in b | |
1155 __ store_ptr(1, rax); // store b in a | |
0 | 1156 // stack: ..., b, a |
1157 } | |
1158 | |
1159 void TemplateTable::iop2(Operation op) { | |
1160 transition(itos, itos); | |
1161 switch (op) { | |
1162 case add : __ pop_i(rdx); __ addl (rax, rdx); break; | |
1163 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break; | |
1164 case mul : __ pop_i(rdx); __ imull(rax, rdx); break; | |
1165 case _and : __ pop_i(rdx); __ andl (rax, rdx); break; | |
1166 case _or : __ pop_i(rdx); __ orl (rax, rdx); break; | |
1167 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break; | |
1168 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break; | |
1169 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break; | |
1170 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break; | |
1171 default : ShouldNotReachHere(); | |
1172 } | |
1173 } | |
1174 | |
1175 void TemplateTable::lop2(Operation op) { | |
1176 transition(ltos, ltos); | |
1177 switch (op) { | |
1506 | 1178 case add : __ pop_l(rdx); __ addptr(rax, rdx); break; |
1179 case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr(rax, rdx); break; | |
1180 case _and : __ pop_l(rdx); __ andptr(rax, rdx); break; | |
1181 case _or : __ pop_l(rdx); __ orptr (rax, rdx); break; | |
1182 case _xor : __ pop_l(rdx); __ xorptr(rax, rdx); break; | |
1183 default : ShouldNotReachHere(); | |
0 | 1184 } |
1185 } | |
1186 | |
1187 void TemplateTable::idiv() { | |
1188 transition(itos, itos); | |
1189 __ movl(rcx, rax); | |
1190 __ pop_i(rax); | |
1191 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If | |
1192 // they are not equal, one could do a normal division (no correction | |
1193 // needed), which may speed up this implementation for the common case. | |
1194 // (see also JVM spec., p.243 & p.271) | |
1195 __ corrected_idivl(rcx); | |
1196 } | |
1197 | |
1198 void TemplateTable::irem() { | |
1199 transition(itos, itos); | |
1200 __ movl(rcx, rax); | |
1201 __ pop_i(rax); | |
1202 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If | |
1203 // they are not equal, one could do a normal division (no correction | |
1204 // needed), which may speed up this implementation for the common case. | |
1205 // (see also JVM spec., p.243 & p.271) | |
1206 __ corrected_idivl(rcx); | |
1207 __ movl(rax, rdx); | |
1208 } | |
1209 | |
1210 void TemplateTable::lmul() { | |
1211 transition(ltos, ltos); | |
1212 __ pop_l(rdx); | |
1213 __ imulq(rax, rdx); | |
1214 } | |
1215 | |
1216 void TemplateTable::ldiv() { | |
1217 transition(ltos, ltos); | |
304 | 1218 __ mov(rcx, rax); |
0 | 1219 __ pop_l(rax); |
1220 // generate explicit div0 check | |
1221 __ testq(rcx, rcx); | |
1222 __ jump_cc(Assembler::zero, | |
1223 ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); | |
1224 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If | |
1225 // they are not equal, one could do a normal division (no correction | |
1226 // needed), which may speed up this implementation for the common case. | |
1227 // (see also JVM spec., p.243 & p.271) | |
1228 __ corrected_idivq(rcx); // kills rbx | |
1229 } | |
1230 | |
1231 void TemplateTable::lrem() { | |
1232 transition(ltos, ltos); | |
304 | 1233 __ mov(rcx, rax); |
0 | 1234 __ pop_l(rax); |
1235 __ testq(rcx, rcx); | |
1236 __ jump_cc(Assembler::zero, | |
1237 ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); | |
1238 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If | |
1239 // they are not equal, one could do a normal division (no correction | |
1240 // needed), which may speed up this implementation for the common case. | |
1241 // (see also JVM spec., p.243 & p.271) | |
1242 __ corrected_idivq(rcx); // kills rbx | |
304 | 1243 __ mov(rax, rdx); |
0 | 1244 } |
1245 | |
1246 void TemplateTable::lshl() { | |
1247 transition(itos, ltos); | |
1248 __ movl(rcx, rax); // get shift count | |
1249 __ pop_l(rax); // get shift value | |
1250 __ shlq(rax); | |
1251 } | |
1252 | |
1253 void TemplateTable::lshr() { | |
1254 transition(itos, ltos); | |
1255 __ movl(rcx, rax); // get shift count | |
1256 __ pop_l(rax); // get shift value | |
1257 __ sarq(rax); | |
1258 } | |
1259 | |
1260 void TemplateTable::lushr() { | |
1261 transition(itos, ltos); | |
1262 __ movl(rcx, rax); // get shift count | |
1263 __ pop_l(rax); // get shift value | |
1264 __ shrq(rax); | |
1265 } | |
1266 | |
1267 void TemplateTable::fop2(Operation op) { | |
1268 transition(ftos, ftos); | |
1269 switch (op) { | |
1270 case add: | |
1271 __ addss(xmm0, at_rsp()); | |
1506 | 1272 __ addptr(rsp, Interpreter::stackElementSize); |
0 | 1273 break; |
1274 case sub: | |
1275 __ movflt(xmm1, xmm0); | |
1276 __ pop_f(xmm0); | |
1277 __ subss(xmm0, xmm1); | |
1278 break; | |
1279 case mul: | |
1280 __ mulss(xmm0, at_rsp()); | |
1506 | 1281 __ addptr(rsp, Interpreter::stackElementSize); |
0 | 1282 break; |
1283 case div: | |
1284 __ movflt(xmm1, xmm0); | |
1285 __ pop_f(xmm0); | |
1286 __ divss(xmm0, xmm1); | |
1287 break; | |
1288 case rem: | |
1289 __ movflt(xmm1, xmm0); | |
1290 __ pop_f(xmm0); | |
1291 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2); | |
1292 break; | |
1293 default: | |
1294 ShouldNotReachHere(); | |
1295 break; | |
1296 } | |
1297 } | |
1298 | |
1299 void TemplateTable::dop2(Operation op) { | |
1300 transition(dtos, dtos); | |
1301 switch (op) { | |
1302 case add: | |
1303 __ addsd(xmm0, at_rsp()); | |
1506 | 1304 __ addptr(rsp, 2 * Interpreter::stackElementSize); |
0 | 1305 break; |
1306 case sub: | |
1307 __ movdbl(xmm1, xmm0); | |
1308 __ pop_d(xmm0); | |
1309 __ subsd(xmm0, xmm1); | |
1310 break; | |
1311 case mul: | |
1312 __ mulsd(xmm0, at_rsp()); | |
1506 | 1313 __ addptr(rsp, 2 * Interpreter::stackElementSize); |
0 | 1314 break; |
1315 case div: | |
1316 __ movdbl(xmm1, xmm0); | |
1317 __ pop_d(xmm0); | |
1318 __ divsd(xmm0, xmm1); | |
1319 break; | |
1320 case rem: | |
1321 __ movdbl(xmm1, xmm0); | |
1322 __ pop_d(xmm0); | |
1323 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2); | |
1324 break; | |
1325 default: | |
1326 ShouldNotReachHere(); | |
1327 break; | |
1328 } | |
1329 } | |
1330 | |
1331 void TemplateTable::ineg() { | |
1332 transition(itos, itos); | |
1333 __ negl(rax); | |
1334 } | |
1335 | |
1336 void TemplateTable::lneg() { | |
1337 transition(ltos, ltos); | |
1338 __ negq(rax); | |
1339 } | |
1340 | |
1341 // Note: 'double' and 'long long' have 32-bits alignment on x86. | |
1342 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { | |
1343 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address | |
1344 // of 128-bits operands for SSE instructions. | |
1345 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF))); | |
1346 // Store the value to a 128-bits operand. | |
1347 operand[0] = lo; | |
1348 operand[1] = hi; | |
1349 return operand; | |
1350 } | |
1351 | |
1352 // Buffer for 128-bits masks used by SSE instructions. | |
1353 static jlong float_signflip_pool[2*2]; | |
1354 static jlong double_signflip_pool[2*2]; | |
1355 | |
1356 void TemplateTable::fneg() { | |
1357 transition(ftos, ftos); | |
1358 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000); | |
1359 __ xorps(xmm0, ExternalAddress((address) float_signflip)); | |
1360 } | |
1361 | |
1362 void TemplateTable::dneg() { | |
1363 transition(dtos, dtos); | |
1364 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000); | |
1365 __ xorpd(xmm0, ExternalAddress((address) double_signflip)); | |
1366 } | |
1367 | |
1368 void TemplateTable::iinc() { | |
1369 transition(vtos, vtos); | |
1370 __ load_signed_byte(rdx, at_bcp(2)); // get constant | |
1371 locals_index(rbx); | |
1372 __ addl(iaddress(rbx), rdx); | |
1373 } | |
1374 | |
1375 void TemplateTable::wide_iinc() { | |
1376 transition(vtos, vtos); | |
1377 __ movl(rdx, at_bcp(4)); // get constant | |
1378 locals_index_wide(rbx); | |
1379 __ bswapl(rdx); // swap bytes & sign-extend constant | |
1380 __ sarl(rdx, 16); | |
1381 __ addl(iaddress(rbx), rdx); | |
1382 // Note: should probably use only one movl to get both | |
1383 // the index and the constant -> fix this | |
1384 } | |
1385 | |
1386 void TemplateTable::convert() { | |
1387 // Checking | |
1388 #ifdef ASSERT | |
1389 { | |
1390 TosState tos_in = ilgl; | |
1391 TosState tos_out = ilgl; | |
1392 switch (bytecode()) { | |
1393 case Bytecodes::_i2l: // fall through | |
1394 case Bytecodes::_i2f: // fall through | |
1395 case Bytecodes::_i2d: // fall through | |
1396 case Bytecodes::_i2b: // fall through | |
1397 case Bytecodes::_i2c: // fall through | |
1398 case Bytecodes::_i2s: tos_in = itos; break; | |
1399 case Bytecodes::_l2i: // fall through | |
1400 case Bytecodes::_l2f: // fall through | |
1401 case Bytecodes::_l2d: tos_in = ltos; break; | |
1402 case Bytecodes::_f2i: // fall through | |
1403 case Bytecodes::_f2l: // fall through | |
1404 case Bytecodes::_f2d: tos_in = ftos; break; | |
1405 case Bytecodes::_d2i: // fall through | |
1406 case Bytecodes::_d2l: // fall through | |
1407 case Bytecodes::_d2f: tos_in = dtos; break; | |
1408 default : ShouldNotReachHere(); | |
1409 } | |
1410 switch (bytecode()) { | |
1411 case Bytecodes::_l2i: // fall through | |
1412 case Bytecodes::_f2i: // fall through | |
1413 case Bytecodes::_d2i: // fall through | |
1414 case Bytecodes::_i2b: // fall through | |
1415 case Bytecodes::_i2c: // fall through | |
1416 case Bytecodes::_i2s: tos_out = itos; break; | |
1417 case Bytecodes::_i2l: // fall through | |
1418 case Bytecodes::_f2l: // fall through | |
1419 case Bytecodes::_d2l: tos_out = ltos; break; | |
1420 case Bytecodes::_i2f: // fall through | |
1421 case Bytecodes::_l2f: // fall through | |
1422 case Bytecodes::_d2f: tos_out = ftos; break; | |
1423 case Bytecodes::_i2d: // fall through | |
1424 case Bytecodes::_l2d: // fall through | |
1425 case Bytecodes::_f2d: tos_out = dtos; break; | |
1426 default : ShouldNotReachHere(); | |
1427 } | |
1428 transition(tos_in, tos_out); | |
1429 } | |
1430 #endif // ASSERT | |
1431 | |
1432 static const int64_t is_nan = 0x8000000000000000L; | |
1433 | |
1434 // Conversion | |
1435 switch (bytecode()) { | |
1436 case Bytecodes::_i2l: | |
1437 __ movslq(rax, rax); | |
1438 break; | |
1439 case Bytecodes::_i2f: | |
1440 __ cvtsi2ssl(xmm0, rax); | |
1441 break; | |
1442 case Bytecodes::_i2d: | |
1443 __ cvtsi2sdl(xmm0, rax); | |
1444 break; | |
1445 case Bytecodes::_i2b: | |
1446 __ movsbl(rax, rax); | |
1447 break; | |
1448 case Bytecodes::_i2c: | |
1449 __ movzwl(rax, rax); | |
1450 break; | |
1451 case Bytecodes::_i2s: | |
1452 __ movswl(rax, rax); | |
1453 break; | |
1454 case Bytecodes::_l2i: | |
1455 __ movl(rax, rax); | |
1456 break; | |
1457 case Bytecodes::_l2f: | |
1458 __ cvtsi2ssq(xmm0, rax); | |
1459 break; | |
1460 case Bytecodes::_l2d: | |
1461 __ cvtsi2sdq(xmm0, rax); | |
1462 break; | |
1463 case Bytecodes::_f2i: | |
1464 { | |
1465 Label L; | |
1466 __ cvttss2sil(rax, xmm0); | |
1467 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? | |
1468 __ jcc(Assembler::notEqual, L); | |
1469 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1); | |
1470 __ bind(L); | |
1471 } | |
1472 break; | |
1473 case Bytecodes::_f2l: | |
1474 { | |
1475 Label L; | |
1476 __ cvttss2siq(rax, xmm0); | |
1477 // NaN or overflow/underflow? | |
1478 __ cmp64(rax, ExternalAddress((address) &is_nan)); | |
1479 __ jcc(Assembler::notEqual, L); | |
1480 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); | |
1481 __ bind(L); | |
1482 } | |
1483 break; | |
1484 case Bytecodes::_f2d: | |
1485 __ cvtss2sd(xmm0, xmm0); | |
1486 break; | |
1487 case Bytecodes::_d2i: | |
1488 { | |
1489 Label L; | |
1490 __ cvttsd2sil(rax, xmm0); | |
1491 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? | |
1492 __ jcc(Assembler::notEqual, L); | |
1493 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1); | |
1494 __ bind(L); | |
1495 } | |
1496 break; | |
1497 case Bytecodes::_d2l: | |
1498 { | |
1499 Label L; | |
1500 __ cvttsd2siq(rax, xmm0); | |
1501 // NaN or overflow/underflow? | |
1502 __ cmp64(rax, ExternalAddress((address) &is_nan)); | |
1503 __ jcc(Assembler::notEqual, L); | |
1504 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1); | |
1505 __ bind(L); | |
1506 } | |
1507 break; | |
1508 case Bytecodes::_d2f: | |
1509 __ cvtsd2ss(xmm0, xmm0); | |
1510 break; | |
1511 default: | |
1512 ShouldNotReachHere(); | |
1513 } | |
1514 } | |
1515 | |
1516 void TemplateTable::lcmp() { | |
1517 transition(ltos, itos); | |
1518 Label done; | |
1519 __ pop_l(rdx); | |
1520 __ cmpq(rdx, rax); | |
1521 __ movl(rax, -1); | |
1522 __ jccb(Assembler::less, done); | |
1523 __ setb(Assembler::notEqual, rax); | |
1524 __ movzbl(rax, rax); | |
1525 __ bind(done); | |
1526 } | |
1527 | |
1528 void TemplateTable::float_cmp(bool is_float, int unordered_result) { | |
1529 Label done; | |
1530 if (is_float) { | |
1531 // XXX get rid of pop here, use ... reg, mem32 | |
1532 __ pop_f(xmm1); | |
1533 __ ucomiss(xmm1, xmm0); | |
1534 } else { | |
1535 // XXX get rid of pop here, use ... reg, mem64 | |
1536 __ pop_d(xmm1); | |
1537 __ ucomisd(xmm1, xmm0); | |
1538 } | |
1539 if (unordered_result < 0) { | |
1540 __ movl(rax, -1); | |
1541 __ jccb(Assembler::parity, done); | |
1542 __ jccb(Assembler::below, done); | |
1543 __ setb(Assembler::notEqual, rdx); | |
1544 __ movzbl(rax, rdx); | |
1545 } else { | |
1546 __ movl(rax, 1); | |
1547 __ jccb(Assembler::parity, done); | |
1548 __ jccb(Assembler::above, done); | |
1549 __ movl(rax, 0); | |
1550 __ jccb(Assembler::equal, done); | |
1551 __ decrementl(rax); | |
1552 } | |
1553 __ bind(done); | |
1554 } | |
1555 | |
1556 void TemplateTable::branch(bool is_jsr, bool is_wide) { | |
1557 __ get_method(rcx); // rcx holds method | |
1558 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx | |
1559 // holds bumped taken count | |
1560 | |
1561 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + | |
1562 InvocationCounter::counter_offset(); | |
1563 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + | |
1564 InvocationCounter::counter_offset(); | |
1565 const int method_offset = frame::interpreter_frame_method_offset * wordSize; | |
1566 | |
1567 // Load up edx with the branch displacement | |
1568 __ movl(rdx, at_bcp(1)); | |
1569 __ bswapl(rdx); | |
1570 | |
1571 if (!is_wide) { | |
1572 __ sarl(rdx, 16); | |
1573 } | |
304 | 1574 __ movl2ptr(rdx, rdx); |
0 | 1575 |
1576 // Handle all the JSR stuff here, then exit. | |
1577 // It's much shorter and cleaner than intermingling with the non-JSR | |
605 | 1578 // normal-branch stuff occurring below. |
0 | 1579 if (is_jsr) { |
1580 // Pre-load the next target bytecode into rbx | |
1581 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0)); | |
1582 | |
1583 // compute return address as bci in rax | |
304 | 1584 __ lea(rax, at_bcp((is_wide ? 5 : 3) - |
0 | 1585 in_bytes(constMethodOopDesc::codes_offset()))); |
304 | 1586 __ subptr(rax, Address(rcx, methodOopDesc::const_offset())); |
0 | 1587 // Adjust the bcp in r13 by the displacement in rdx |
304 | 1588 __ addptr(r13, rdx); |
0 | 1589 // jsr returns atos that is not an oop |
1590 __ push_i(rax); | |
1591 __ dispatch_only(vtos); | |
1592 return; | |
1593 } | |
1594 | |
1595 // Normal (non-jsr) branch handling | |
1596 | |
1597 // Adjust the bcp in r13 by the displacement in rdx | |
304 | 1598 __ addptr(r13, rdx); |
0 | 1599 |
1600 assert(UseLoopCounter || !UseOnStackReplacement, | |
1601 "on-stack-replacement requires loop counters"); | |
1602 Label backedge_counter_overflow; | |
1603 Label profile_method; | |
1604 Label dispatch; | |
1605 if (UseLoopCounter) { | |
1606 // increment backedge counter for backward branches | |
1607 // rax: MDO | |
1608 // ebx: MDO bumped taken-count | |
1609 // rcx: method | |
1610 // rdx: target offset | |
1611 // r13: target bcp | |
1612 // r14: locals pointer | |
1613 __ testl(rdx, rdx); // check if forward or backward branch | |
1614 __ jcc(Assembler::positive, dispatch); // count only if backward branch | |
1783 | 1615 if (TieredCompilation) { |
1616 Label no_mdo; | |
1617 int increment = InvocationCounter::count_increment; | |
1618 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; | |
1619 if (ProfileInterpreter) { | |
1620 // Are we profiling? | |
1621 __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset()))); | |
1622 __ testptr(rbx, rbx); | |
1623 __ jccb(Assembler::zero, no_mdo); | |
1624 // Increment the MDO backedge counter | |
1625 const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) + | |
1626 in_bytes(InvocationCounter::counter_offset())); | |
1627 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, | |
1628 rax, false, Assembler::zero, &backedge_counter_overflow); | |
1629 __ jmp(dispatch); | |
0 | 1630 } |
1783 | 1631 __ bind(no_mdo); |
1632 // Increment backedge counter in methodOop | |
1633 __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask, | |
1634 rax, false, Assembler::zero, &backedge_counter_overflow); | |
0 | 1635 } else { |
1783 | 1636 // increment counter |
1637 __ movl(rax, Address(rcx, be_offset)); // load backedge counter | |
1638 __ incrementl(rax, InvocationCounter::count_increment); // increment counter | |
1639 __ movl(Address(rcx, be_offset), rax); // store counter | |
1640 | |
1641 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter | |
1642 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits | |
1643 __ addl(rax, Address(rcx, be_offset)); // add both counters | |
1644 | |
1645 if (ProfileInterpreter) { | |
1646 // Test to see if we should create a method data oop | |
0 | 1647 __ cmp32(rax, |
1783 | 1648 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit)); |
1649 __ jcc(Assembler::less, dispatch); | |
1650 | |
1651 // if no method data exists, go to profile method | |
1652 __ test_method_data_pointer(rax, profile_method); | |
1653 | |
1654 if (UseOnStackReplacement) { | |
1655 // check for overflow against ebx which is the MDO taken count | |
1656 __ cmp32(rbx, | |
1657 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); | |
1658 __ jcc(Assembler::below, dispatch); | |
1659 | |
1660 // When ProfileInterpreter is on, the backedge_count comes | |
1661 // from the methodDataOop, which value does not get reset on | |
1662 // the call to frequency_counter_overflow(). To avoid | |
1663 // excessive calls to the overflow routine while the method is | |
1664 // being compiled, add a second test to make sure the overflow | |
1665 // function is called only once every overflow_frequency. | |
1666 const int overflow_frequency = 1024; | |
1667 __ andl(rbx, overflow_frequency - 1); | |
1668 __ jcc(Assembler::zero, backedge_counter_overflow); | |
1669 | |
1670 } | |
1671 } else { | |
1672 if (UseOnStackReplacement) { | |
1673 // check for overflow against eax, which is the sum of the | |
1674 // counters | |
1675 __ cmp32(rax, | |
1676 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); | |
1677 __ jcc(Assembler::aboveEqual, backedge_counter_overflow); | |
1678 | |
1679 } | |
0 | 1680 } |
1681 } | |
1682 __ bind(dispatch); | |
1683 } | |
1684 | |
1685 // Pre-load the next target bytecode into rbx | |
1686 __ load_unsigned_byte(rbx, Address(r13, 0)); | |
1687 | |
1688 // continue with the bytecode @ target | |
1689 // eax: return bci for jsr's, unused otherwise | |
1690 // ebx: target bytecode | |
1691 // r13: target bcp | |
1692 __ dispatch_only(vtos); | |
1693 | |
1694 if (UseLoopCounter) { | |
1695 if (ProfileInterpreter) { | |
1696 // Out-of-line code to allocate method data oop. | |
1697 __ bind(profile_method); | |
1698 __ call_VM(noreg, | |
1699 CAST_FROM_FN_PTR(address, | |
1700 InterpreterRuntime::profile_method), r13); | |
1701 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode | |
304 | 1702 __ movptr(rcx, Address(rbp, method_offset)); |
1703 __ movptr(rcx, Address(rcx, | |
1704 in_bytes(methodOopDesc::method_data_offset()))); | |
1705 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), | |
1706 rcx); | |
0 | 1707 __ test_method_data_pointer(rcx, dispatch); |
1708 // offset non-null mdp by MDO::data_offset() + IR::profile_method() | |
304 | 1709 __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset())); |
1710 __ addptr(rcx, rax); | |
1711 __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), | |
1712 rcx); | |
0 | 1713 __ jmp(dispatch); |
1714 } | |
1715 | |
1716 if (UseOnStackReplacement) { | |
1717 // invocation counter overflow | |
1718 __ bind(backedge_counter_overflow); | |
304 | 1719 __ negptr(rdx); |
1720 __ addptr(rdx, r13); // branch bcp | |
0 | 1721 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) |
1722 __ call_VM(noreg, | |
1723 CAST_FROM_FN_PTR(address, | |
1724 InterpreterRuntime::frequency_counter_overflow), | |
1725 rdx); | |
1726 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode | |
1727 | |
1728 // rax: osr nmethod (osr ok) or NULL (osr not possible) | |
1729 // ebx: target bytecode | |
1730 // rdx: scratch | |
1731 // r14: locals pointer | |
1732 // r13: bcp | |
304 | 1733 __ testptr(rax, rax); // test result |
0 | 1734 __ jcc(Assembler::zero, dispatch); // no osr if null |
1735 // nmethod may have been invalidated (VM may block upon call_VM return) | |
1736 __ movl(rcx, Address(rax, nmethod::entry_bci_offset())); | |
1737 __ cmpl(rcx, InvalidOSREntryBci); | |
1738 __ jcc(Assembler::equal, dispatch); | |
1739 | |
1740 // We have the address of an on stack replacement routine in eax | |
1741 // We need to prepare to execute the OSR method. First we must | |
1742 // migrate the locals and monitors off of the stack. | |
1743 | |
304 | 1744 __ mov(r13, rax); // save the nmethod |
0 | 1745 |
1746 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); | |
1747 | |
1748 // eax is OSR buffer, move it to expected parameter location | |
304 | 1749 __ mov(j_rarg0, rax); |
0 | 1750 |
1751 // We use j_rarg definitions here so that registers don't conflict as parameter | |
1752 // registers change across platforms as we are in the midst of a calling | |
1753 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters. | |
1754 | |
1755 const Register retaddr = j_rarg2; | |
1756 const Register sender_sp = j_rarg1; | |
1757 | |
1758 // pop the interpreter frame | |
304 | 1759 __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp |
0 | 1760 __ leave(); // remove frame anchor |
304 | 1761 __ pop(retaddr); // get return address |
1762 __ mov(rsp, sender_sp); // set sp to sender sp | |
0 | 1763 // Ensure compiled code always sees stack at proper alignment |
304 | 1764 __ andptr(rsp, -(StackAlignmentInBytes)); |
0 | 1765 |
1766 // unlike x86 we need no specialized return from compiled code | |
1767 // to the interpreter or the call stub. | |
1768 | |
1769 // push the return address | |
304 | 1770 __ push(retaddr); |
0 | 1771 |
1772 // and begin the OSR nmethod | |
1773 __ jmp(Address(r13, nmethod::osr_entry_point_offset())); | |
1774 } | |
1775 } | |
1776 } | |
1777 | |
1778 | |
1779 void TemplateTable::if_0cmp(Condition cc) { | |
1780 transition(itos, vtos); | |
1781 // assume branch is more often taken than not (loops use backward branches) | |
1782 Label not_taken; | |
1783 __ testl(rax, rax); | |
1784 __ jcc(j_not(cc), not_taken); | |
1785 branch(false, false); | |
1786 __ bind(not_taken); | |
1787 __ profile_not_taken_branch(rax); | |
1788 } | |
1789 | |
1790 void TemplateTable::if_icmp(Condition cc) { | |
1791 transition(itos, vtos); | |
1792 // assume branch is more often taken than not (loops use backward branches) | |
1793 Label not_taken; | |
1794 __ pop_i(rdx); | |
1795 __ cmpl(rdx, rax); | |
1796 __ jcc(j_not(cc), not_taken); | |
1797 branch(false, false); | |
1798 __ bind(not_taken); | |
1799 __ profile_not_taken_branch(rax); | |
1800 } | |
1801 | |
1802 void TemplateTable::if_nullcmp(Condition cc) { | |
1803 transition(atos, vtos); | |
1804 // assume branch is more often taken than not (loops use backward branches) | |
1805 Label not_taken; | |
304 | 1806 __ testptr(rax, rax); |
0 | 1807 __ jcc(j_not(cc), not_taken); |
1808 branch(false, false); | |
1809 __ bind(not_taken); | |
1810 __ profile_not_taken_branch(rax); | |
1811 } | |
1812 | |
1813 void TemplateTable::if_acmp(Condition cc) { | |
1814 transition(atos, vtos); | |
1815 // assume branch is more often taken than not (loops use backward branches) | |
1816 Label not_taken; | |
1817 __ pop_ptr(rdx); | |
304 | 1818 __ cmpptr(rdx, rax); |
0 | 1819 __ jcc(j_not(cc), not_taken); |
1820 branch(false, false); | |
1821 __ bind(not_taken); | |
1822 __ profile_not_taken_branch(rax); | |
1823 } | |
1824 | |
1825 void TemplateTable::ret() { | |
1826 transition(vtos, vtos); | |
1827 locals_index(rbx); | |
304 | 1828 __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp |
0 | 1829 __ profile_ret(rbx, rcx); |
1830 __ get_method(rax); | |
304 | 1831 __ movptr(r13, Address(rax, methodOopDesc::const_offset())); |
1832 __ lea(r13, Address(r13, rbx, Address::times_1, | |
1833 constMethodOopDesc::codes_offset())); | |
0 | 1834 __ dispatch_next(vtos); |
1835 } | |
1836 | |
1837 void TemplateTable::wide_ret() { | |
1838 transition(vtos, vtos); | |
1839 locals_index_wide(rbx); | |
304 | 1840 __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp |
0 | 1841 __ profile_ret(rbx, rcx); |
1842 __ get_method(rax); | |
304 | 1843 __ movptr(r13, Address(rax, methodOopDesc::const_offset())); |
1844 __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset())); | |
0 | 1845 __ dispatch_next(vtos); |
1846 } | |
1847 | |
1848 void TemplateTable::tableswitch() { | |
1849 Label default_case, continue_execution; | |
1850 transition(itos, vtos); | |
1851 // align r13 | |
304 | 1852 __ lea(rbx, at_bcp(BytesPerInt)); |
1853 __ andptr(rbx, -BytesPerInt); | |
0 | 1854 // load lo & hi |
1855 __ movl(rcx, Address(rbx, BytesPerInt)); | |
1856 __ movl(rdx, Address(rbx, 2 * BytesPerInt)); | |
1857 __ bswapl(rcx); | |
1858 __ bswapl(rdx); | |
1859 // check against lo & hi | |
1860 __ cmpl(rax, rcx); | |
1861 __ jcc(Assembler::less, default_case); | |
1862 __ cmpl(rax, rdx); | |
1863 __ jcc(Assembler::greater, default_case); | |
1864 // lookup dispatch offset | |
1865 __ subl(rax, rcx); | |
1866 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt)); | |
1867 __ profile_switch_case(rax, rbx, rcx); | |
1868 // continue execution | |
1869 __ bind(continue_execution); | |
1870 __ bswapl(rdx); | |
304 | 1871 __ movl2ptr(rdx, rdx); |
0 | 1872 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); |
304 | 1873 __ addptr(r13, rdx); |
0 | 1874 __ dispatch_only(vtos); |
1875 // handle default | |
1876 __ bind(default_case); | |
1877 __ profile_switch_default(rax); | |
1878 __ movl(rdx, Address(rbx, 0)); | |
1879 __ jmp(continue_execution); | |
1880 } | |
1881 | |
1882 void TemplateTable::lookupswitch() { | |
1883 transition(itos, itos); | |
1884 __ stop("lookupswitch bytecode should have been rewritten"); | |
1885 } | |
1886 | |
1887 void TemplateTable::fast_linearswitch() { | |
1888 transition(itos, vtos); | |
1889 Label loop_entry, loop, found, continue_execution; | |
1890 // bswap rax so we can avoid bswapping the table entries | |
1891 __ bswapl(rax); | |
1892 // align r13 | |
304 | 1893 __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of |
1894 // this instruction (change offsets | |
1895 // below) | |
1896 __ andptr(rbx, -BytesPerInt); | |
0 | 1897 // set counter |
1898 __ movl(rcx, Address(rbx, BytesPerInt)); | |
1899 __ bswapl(rcx); | |
1900 __ jmpb(loop_entry); | |
1901 // table search | |
1902 __ bind(loop); | |
1903 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt)); | |
1904 __ jcc(Assembler::equal, found); | |
1905 __ bind(loop_entry); | |
1906 __ decrementl(rcx); | |
1907 __ jcc(Assembler::greaterEqual, loop); | |
1908 // default case | |
1909 __ profile_switch_default(rax); | |
1910 __ movl(rdx, Address(rbx, 0)); | |
1911 __ jmp(continue_execution); | |
1912 // entry found -> get offset | |
1913 __ bind(found); | |
1914 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt)); | |
1915 __ profile_switch_case(rcx, rax, rbx); | |
1916 // continue execution | |
1917 __ bind(continue_execution); | |
1918 __ bswapl(rdx); | |
304 | 1919 __ movl2ptr(rdx, rdx); |
0 | 1920 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); |
304 | 1921 __ addptr(r13, rdx); |
0 | 1922 __ dispatch_only(vtos); |
1923 } | |
1924 | |
1925 void TemplateTable::fast_binaryswitch() { | |
1926 transition(itos, vtos); | |
1927 // Implementation using the following core algorithm: | |
1928 // | |
1929 // int binary_search(int key, LookupswitchPair* array, int n) { | |
1930 // // Binary search according to "Methodik des Programmierens" by | |
1931 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. | |
1932 // int i = 0; | |
1933 // int j = n; | |
1934 // while (i+1 < j) { | |
1935 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) | |
1936 // // with Q: for all i: 0 <= i < n: key < a[i] | |
1937 // // where a stands for the array and assuming that the (inexisting) | |
1938 // // element a[n] is infinitely big. | |
1939 // int h = (i + j) >> 1; | |
1940 // // i < h < j | |
1941 // if (key < array[h].fast_match()) { | |
1942 // j = h; | |
1943 // } else { | |
1944 // i = h; | |
1945 // } | |
1946 // } | |
1947 // // R: a[i] <= key < a[i+1] or Q | |
1948 // // (i.e., if key is within array, i is the correct index) | |
1949 // return i; | |
1950 // } | |
1951 | |
1952 // Register allocation | |
1953 const Register key = rax; // already set (tosca) | |
1954 const Register array = rbx; | |
1955 const Register i = rcx; | |
1956 const Register j = rdx; | |
1957 const Register h = rdi; | |
1958 const Register temp = rsi; | |
1959 | |
1960 // Find array start | |
304 | 1961 __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to |
1962 // get rid of this | |
1963 // instruction (change | |
1964 // offsets below) | |
1965 __ andptr(array, -BytesPerInt); | |
0 | 1966 |
1967 // Initialize i & j | |
1968 __ xorl(i, i); // i = 0; | |
1969 __ movl(j, Address(array, -BytesPerInt)); // j = length(array); | |
1970 | |
1971 // Convert j into native byteordering | |
1972 __ bswapl(j); | |
1973 | |
1974 // And start | |
1975 Label entry; | |
1976 __ jmp(entry); | |
1977 | |
1978 // binary search loop | |
1979 { | |
1980 Label loop; | |
1981 __ bind(loop); | |
1982 // int h = (i + j) >> 1; | |
1983 __ leal(h, Address(i, j, Address::times_1)); // h = i + j; | |
1984 __ sarl(h, 1); // h = (i + j) >> 1; | |
1985 // if (key < array[h].fast_match()) { | |
1986 // j = h; | |
1987 // } else { | |
1988 // i = h; | |
1989 // } | |
1990 // Convert array[h].match to native byte-ordering before compare | |
1991 __ movl(temp, Address(array, h, Address::times_8)); | |
1992 __ bswapl(temp); | |
1993 __ cmpl(key, temp); | |
1994 // j = h if (key < array[h].fast_match()) | |
1995 __ cmovl(Assembler::less, j, h); | |
1996 // i = h if (key >= array[h].fast_match()) | |
1997 __ cmovl(Assembler::greaterEqual, i, h); | |
1998 // while (i+1 < j) | |
1999 __ bind(entry); | |
2000 __ leal(h, Address(i, 1)); // i+1 | |
2001 __ cmpl(h, j); // i+1 < j | |
2002 __ jcc(Assembler::less, loop); | |
2003 } | |
2004 | |
2005 // end of binary search, result index is i (must check again!) | |
2006 Label default_case; | |
2007 // Convert array[i].match to native byte-ordering before compare | |
2008 __ movl(temp, Address(array, i, Address::times_8)); | |
2009 __ bswapl(temp); | |
2010 __ cmpl(key, temp); | |
2011 __ jcc(Assembler::notEqual, default_case); | |
2012 | |
2013 // entry found -> j = offset | |
2014 __ movl(j , Address(array, i, Address::times_8, BytesPerInt)); | |
2015 __ profile_switch_case(i, key, array); | |
2016 __ bswapl(j); | |
304 | 2017 __ movl2ptr(j, j); |
0 | 2018 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); |
304 | 2019 __ addptr(r13, j); |
0 | 2020 __ dispatch_only(vtos); |
2021 | |
2022 // default case -> j = default offset | |
2023 __ bind(default_case); | |
2024 __ profile_switch_default(i); | |
2025 __ movl(j, Address(array, -2 * BytesPerInt)); | |
2026 __ bswapl(j); | |
304 | 2027 __ movl2ptr(j, j); |
0 | 2028 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); |
304 | 2029 __ addptr(r13, j); |
0 | 2030 __ dispatch_only(vtos); |
2031 } | |
2032 | |
2033 | |
2034 void TemplateTable::_return(TosState state) { | |
2035 transition(state, state); | |
2036 assert(_desc->calls_vm(), | |
2037 "inconsistent calls_vm information"); // call in remove_activation | |
2038 | |
2039 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { | |
2040 assert(state == vtos, "only valid state"); | |
304 | 2041 __ movptr(c_rarg1, aaddress(0)); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2042 __ load_klass(rdi, c_rarg1); |
0 | 2043 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); |
2044 __ testl(rdi, JVM_ACC_HAS_FINALIZER); | |
2045 Label skip_register_finalizer; | |
2046 __ jcc(Assembler::zero, skip_register_finalizer); | |
2047 | |
2048 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1); | |
2049 | |
2050 __ bind(skip_register_finalizer); | |
2051 } | |
2052 | |
2053 __ remove_activation(state, r13); | |
2054 __ jmp(r13); | |
2055 } | |
2056 | |
2057 // ---------------------------------------------------------------------------- | |
2058 // Volatile variables demand their effects be made known to all CPU's | |
2059 // in order. Store buffers on most chips allow reads & writes to | |
2060 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode | |
2061 // without some kind of memory barrier (i.e., it's not sufficient that | |
2062 // the interpreter does not reorder volatile references, the hardware | |
2063 // also must not reorder them). | |
2064 // | |
2065 // According to the new Java Memory Model (JMM): | |
2066 // (1) All volatiles are serialized wrt to each other. ALSO reads & | |
2067 // writes act as aquire & release, so: | |
2068 // (2) A read cannot let unrelated NON-volatile memory refs that | |
2069 // happen after the read float up to before the read. It's OK for | |
2070 // non-volatile memory refs that happen before the volatile read to | |
2071 // float down below it. | |
2072 // (3) Similar a volatile write cannot let unrelated NON-volatile | |
2073 // memory refs that happen BEFORE the write float down to after the | |
2074 // write. It's OK for non-volatile memory refs that happen after the | |
2075 // volatile write to float up before it. | |
2076 // | |
2077 // We only put in barriers around volatile refs (they are expensive), | |
2078 // not _between_ memory refs (that would require us to track the | |
2079 // flavor of the previous memory refs). Requirements (2) and (3) | |
2080 // require some barriers before volatile stores and after volatile | |
2081 // loads. These nearly cover requirement (1) but miss the | |
2082 // volatile-store-volatile-load case. This final case is placed after | |
2083 // volatile-stores although it could just as well go before | |
2084 // volatile-loads. | |
2085 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits | |
2086 order_constraint) { | |
2087 // Helper function to insert a is-volatile test and memory barrier | |
2088 if (os::is_MP()) { // Not needed on single CPU | |
2089 __ membar(order_constraint); | |
2090 } | |
2091 } | |
2092 | |
1565 | 2093 void TemplateTable::resolve_cache_and_index(int byte_no, |
2094 Register result, | |
2095 Register Rcache, | |
2096 Register index, | |
2097 size_t index_size) { | |
0 | 2098 const Register temp = rbx; |
1565 | 2099 assert_different_registers(result, Rcache, index, temp); |
2100 | |
0 | 2101 Label resolved; |
1565 | 2102 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); |
2103 if (byte_no == f1_oop) { | |
2104 // We are resolved if the f1 field contains a non-null object (CallSite, etc.) | |
2105 // This kind of CP cache entry does not need to match the flags byte, because | |
2106 // there is a 1-1 relation between bytecode type and CP entry type. | |
2107 assert(result != noreg, ""); //else do cmpptr(Address(...), (int32_t) NULL_WORD) | |
2108 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset())); | |
2109 __ testptr(result, result); | |
1108 | 2110 __ jcc(Assembler::notEqual, resolved); |
2111 } else { | |
1565 | 2112 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); |
2113 assert(result == noreg, ""); //else change code for setting result | |
2114 const int shift_count = (1 + byte_no) * BitsPerByte; | |
1108 | 2115 __ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); |
2116 __ shrl(temp, shift_count); | |
2117 // have we resolved this bytecode? | |
2118 __ andl(temp, 0xFF); | |
2119 __ cmpl(temp, (int) bytecode()); | |
2120 __ jcc(Assembler::equal, resolved); | |
2121 } | |
0 | 2122 |
2123 // resolve first time through | |
2124 address entry; | |
2125 switch (bytecode()) { | |
2126 case Bytecodes::_getstatic: | |
2127 case Bytecodes::_putstatic: | |
2128 case Bytecodes::_getfield: | |
2129 case Bytecodes::_putfield: | |
2130 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); | |
2131 break; | |
2132 case Bytecodes::_invokevirtual: | |
2133 case Bytecodes::_invokespecial: | |
2134 case Bytecodes::_invokestatic: | |
2135 case Bytecodes::_invokeinterface: | |
2136 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); | |
2137 break; | |
1108 | 2138 case Bytecodes::_invokedynamic: |
2139 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); | |
2140 break; | |
1602 | 2141 case Bytecodes::_fast_aldc: |
2142 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); | |
2143 break; | |
2144 case Bytecodes::_fast_aldc_w: | |
2145 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); | |
2146 break; | |
0 | 2147 default: |
2148 ShouldNotReachHere(); | |
2149 break; | |
2150 } | |
2151 __ movl(temp, (int) bytecode()); | |
2152 __ call_VM(noreg, entry, temp); | |
2153 | |
2154 // Update registers with resolved info | |
1565 | 2155 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); |
2156 if (result != noreg) | |
2157 __ movptr(result, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset())); | |
0 | 2158 __ bind(resolved); |
2159 } | |
2160 | |
2161 // The Rcache and index registers must be set before call | |
2162 void TemplateTable::load_field_cp_cache_entry(Register obj, | |
2163 Register cache, | |
2164 Register index, | |
2165 Register off, | |
2166 Register flags, | |
2167 bool is_static = false) { | |
2168 assert_different_registers(cache, index, flags, off); | |
2169 | |
2170 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); | |
2171 // Field offset | |
304 | 2172 __ movptr(off, Address(cache, index, Address::times_8, |
2173 in_bytes(cp_base_offset + | |
2174 ConstantPoolCacheEntry::f2_offset()))); | |
0 | 2175 // Flags |
2176 __ movl(flags, Address(cache, index, Address::times_8, | |
2177 in_bytes(cp_base_offset + | |
2178 ConstantPoolCacheEntry::flags_offset()))); | |
2179 | |
2180 // klass overwrite register | |
2181 if (is_static) { | |
304 | 2182 __ movptr(obj, Address(cache, index, Address::times_8, |
2183 in_bytes(cp_base_offset + | |
2184 ConstantPoolCacheEntry::f1_offset()))); | |
0 | 2185 } |
2186 } | |
2187 | |
2188 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, | |
2189 Register method, | |
2190 Register itable_index, | |
2191 Register flags, | |
2192 bool is_invokevirtual, | |
1565 | 2193 bool is_invokevfinal, /*unused*/ |
2194 bool is_invokedynamic) { | |
0 | 2195 // setup registers |
2196 const Register cache = rcx; | |
2197 const Register index = rdx; | |
2198 assert_different_registers(method, flags); | |
2199 assert_different_registers(method, cache, index); | |
2200 assert_different_registers(itable_index, flags); | |
2201 assert_different_registers(itable_index, cache, index); | |
2202 // determine constant pool cache field offsets | |
2203 const int method_offset = in_bytes( | |
2204 constantPoolCacheOopDesc::base_offset() + | |
2205 (is_invokevirtual | |
2206 ? ConstantPoolCacheEntry::f2_offset() | |
2207 : ConstantPoolCacheEntry::f1_offset())); | |
2208 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2209 ConstantPoolCacheEntry::flags_offset()); | |
2210 // access constant pool cache fields | |
2211 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2212 ConstantPoolCacheEntry::f2_offset()); | |
2213 | |
1565 | 2214 if (byte_no == f1_oop) { |
2215 // Resolved f1_oop goes directly into 'method' register. | |
2216 assert(is_invokedynamic, ""); | |
2217 resolve_cache_and_index(byte_no, method, cache, index, sizeof(u4)); | |
2218 } else { | |
2219 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); | |
2220 __ movptr(method, Address(cache, index, Address::times_ptr, method_offset)); | |
2221 } | |
0 | 2222 if (itable_index != noreg) { |
1565 | 2223 __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset)); |
0 | 2224 } |
1565 | 2225 __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset)); |
0 | 2226 } |
2227 | |
2228 | |
2229 // The registers cache and index expected to be set before call. | |
2230 // Correct values of the cache and index registers are preserved. | |
2231 void TemplateTable::jvmti_post_field_access(Register cache, Register index, | |
2232 bool is_static, bool has_tos) { | |
2233 // do the JVMTI work here to avoid disturbing the register state below | |
2234 // We use c_rarg registers here because we want to use the register used in | |
2235 // the call to the VM | |
2236 if (JvmtiExport::can_post_field_access()) { | |
2237 // Check to see if a field access watch has been set before we | |
2238 // take the time to call into the VM. | |
2239 Label L1; | |
2240 assert_different_registers(cache, index, rax); | |
2241 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); | |
2242 __ testl(rax, rax); | |
2243 __ jcc(Assembler::zero, L1); | |
2244 | |
2245 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1); | |
2246 | |
2247 // cache entry pointer | |
304 | 2248 __ addptr(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset())); |
0 | 2249 __ shll(c_rarg3, LogBytesPerWord); |
304 | 2250 __ addptr(c_rarg2, c_rarg3); |
0 | 2251 if (is_static) { |
2252 __ xorl(c_rarg1, c_rarg1); // NULL object reference | |
2253 } else { | |
304 | 2254 __ movptr(c_rarg1, at_tos()); // get object pointer without popping it |
0 | 2255 __ verify_oop(c_rarg1); |
2256 } | |
2257 // c_rarg1: object pointer or NULL | |
2258 // c_rarg2: cache entry pointer | |
2259 // c_rarg3: jvalue object on the stack | |
2260 __ call_VM(noreg, CAST_FROM_FN_PTR(address, | |
2261 InterpreterRuntime::post_field_access), | |
2262 c_rarg1, c_rarg2, c_rarg3); | |
2263 __ get_cache_and_index_at_bcp(cache, index, 1); | |
2264 __ bind(L1); | |
2265 } | |
2266 } | |
2267 | |
2268 void TemplateTable::pop_and_check_object(Register r) { | |
2269 __ pop_ptr(r); | |
2270 __ null_check(r); // for field access must check obj. | |
2271 __ verify_oop(r); | |
2272 } | |
2273 | |
2274 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { | |
2275 transition(vtos, vtos); | |
2276 | |
2277 const Register cache = rcx; | |
2278 const Register index = rdx; | |
2279 const Register obj = c_rarg3; | |
2280 const Register off = rbx; | |
2281 const Register flags = rax; | |
2282 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them | |
2283 | |
1565 | 2284 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); |
0 | 2285 jvmti_post_field_access(cache, index, is_static, false); |
2286 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); | |
2287 | |
2288 if (!is_static) { | |
2289 // obj is on the stack | |
2290 pop_and_check_object(obj); | |
2291 } | |
2292 | |
2293 const Address field(obj, off, Address::times_1); | |
2294 | |
2295 Label Done, notByte, notInt, notShort, notChar, | |
2296 notLong, notFloat, notObj, notDouble; | |
2297 | |
2298 __ shrl(flags, ConstantPoolCacheEntry::tosBits); | |
2299 assert(btos == 0, "change code, btos != 0"); | |
2300 | |
2301 __ andl(flags, 0x0F); | |
2302 __ jcc(Assembler::notZero, notByte); | |
2303 // btos | |
2304 __ load_signed_byte(rax, field); | |
2305 __ push(btos); | |
2306 // Rewrite bytecode to be faster | |
2307 if (!is_static) { | |
2308 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx); | |
2309 } | |
2310 __ jmp(Done); | |
2311 | |
2312 __ bind(notByte); | |
2313 __ cmpl(flags, atos); | |
2314 __ jcc(Assembler::notEqual, notObj); | |
2315 // atos | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2316 __ load_heap_oop(rax, field); |
0 | 2317 __ push(atos); |
2318 if (!is_static) { | |
2319 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); | |
2320 } | |
2321 __ jmp(Done); | |
2322 | |
2323 __ bind(notObj); | |
2324 __ cmpl(flags, itos); | |
2325 __ jcc(Assembler::notEqual, notInt); | |
2326 // itos | |
2327 __ movl(rax, field); | |
2328 __ push(itos); | |
2329 // Rewrite bytecode to be faster | |
2330 if (!is_static) { | |
2331 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx); | |
2332 } | |
2333 __ jmp(Done); | |
2334 | |
2335 __ bind(notInt); | |
2336 __ cmpl(flags, ctos); | |
2337 __ jcc(Assembler::notEqual, notChar); | |
2338 // ctos | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
2339 __ load_unsigned_short(rax, field); |
0 | 2340 __ push(ctos); |
2341 // Rewrite bytecode to be faster | |
2342 if (!is_static) { | |
2343 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx); | |
2344 } | |
2345 __ jmp(Done); | |
2346 | |
2347 __ bind(notChar); | |
2348 __ cmpl(flags, stos); | |
2349 __ jcc(Assembler::notEqual, notShort); | |
2350 // stos | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
2351 __ load_signed_short(rax, field); |
0 | 2352 __ push(stos); |
2353 // Rewrite bytecode to be faster | |
2354 if (!is_static) { | |
2355 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx); | |
2356 } | |
2357 __ jmp(Done); | |
2358 | |
2359 __ bind(notShort); | |
2360 __ cmpl(flags, ltos); | |
2361 __ jcc(Assembler::notEqual, notLong); | |
2362 // ltos | |
2363 __ movq(rax, field); | |
2364 __ push(ltos); | |
2365 // Rewrite bytecode to be faster | |
2366 if (!is_static) { | |
2367 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx); | |
2368 } | |
2369 __ jmp(Done); | |
2370 | |
2371 __ bind(notLong); | |
2372 __ cmpl(flags, ftos); | |
2373 __ jcc(Assembler::notEqual, notFloat); | |
2374 // ftos | |
2375 __ movflt(xmm0, field); | |
2376 __ push(ftos); | |
2377 // Rewrite bytecode to be faster | |
2378 if (!is_static) { | |
2379 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx); | |
2380 } | |
2381 __ jmp(Done); | |
2382 | |
2383 __ bind(notFloat); | |
2384 #ifdef ASSERT | |
2385 __ cmpl(flags, dtos); | |
2386 __ jcc(Assembler::notEqual, notDouble); | |
2387 #endif | |
2388 // dtos | |
2389 __ movdbl(xmm0, field); | |
2390 __ push(dtos); | |
2391 // Rewrite bytecode to be faster | |
2392 if (!is_static) { | |
2393 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx); | |
2394 } | |
2395 #ifdef ASSERT | |
2396 __ jmp(Done); | |
2397 | |
2398 __ bind(notDouble); | |
2399 __ stop("Bad state"); | |
2400 #endif | |
2401 | |
2402 __ bind(Done); | |
2403 // [jk] not needed currently | |
2404 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad | | |
2405 // Assembler::LoadStore)); | |
2406 } | |
2407 | |
2408 | |
2409 void TemplateTable::getfield(int byte_no) { | |
2410 getfield_or_static(byte_no, false); | |
2411 } | |
2412 | |
2413 void TemplateTable::getstatic(int byte_no) { | |
2414 getfield_or_static(byte_no, true); | |
2415 } | |
2416 | |
2417 // The registers cache and index expected to be set before call. | |
2418 // The function may destroy various registers, just not the cache and index registers. | |
2419 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { | |
2420 transition(vtos, vtos); | |
2421 | |
2422 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); | |
2423 | |
2424 if (JvmtiExport::can_post_field_modification()) { | |
2425 // Check to see if a field modification watch has been set before | |
2426 // we take the time to call into the VM. | |
2427 Label L1; | |
2428 assert_different_registers(cache, index, rax); | |
2429 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); | |
2430 __ testl(rax, rax); | |
2431 __ jcc(Assembler::zero, L1); | |
2432 | |
2433 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1); | |
2434 | |
2435 if (is_static) { | |
2436 // Life is simple. Null out the object pointer. | |
2437 __ xorl(c_rarg1, c_rarg1); | |
2438 } else { | |
2439 // Life is harder. The stack holds the value on top, followed by | |
2440 // the object. We don't know the size of the value, though; it | |
2441 // could be one or two words depending on its type. As a result, | |
2442 // we must find the type to determine where the object is. | |
2443 __ movl(c_rarg3, Address(c_rarg2, rscratch1, | |
2444 Address::times_8, | |
2445 in_bytes(cp_base_offset + | |
2446 ConstantPoolCacheEntry::flags_offset()))); | |
2447 __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits); | |
2448 // Make sure we don't need to mask rcx for tosBits after the | |
2449 // above shift | |
2450 ConstantPoolCacheEntry::verify_tosBits(); | |
304 | 2451 __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue |
0 | 2452 __ cmpl(c_rarg3, ltos); |
304 | 2453 __ cmovptr(Assembler::equal, |
2454 c_rarg1, at_tos_p2()); // ltos (two word jvalue) | |
0 | 2455 __ cmpl(c_rarg3, dtos); |
304 | 2456 __ cmovptr(Assembler::equal, |
2457 c_rarg1, at_tos_p2()); // dtos (two word jvalue) | |
0 | 2458 } |
2459 // cache entry pointer | |
304 | 2460 __ addptr(c_rarg2, in_bytes(cp_base_offset)); |
0 | 2461 __ shll(rscratch1, LogBytesPerWord); |
304 | 2462 __ addptr(c_rarg2, rscratch1); |
0 | 2463 // object (tos) |
304 | 2464 __ mov(c_rarg3, rsp); |
0 | 2465 // c_rarg1: object pointer set up above (NULL if static) |
2466 // c_rarg2: cache entry pointer | |
2467 // c_rarg3: jvalue object on the stack | |
2468 __ call_VM(noreg, | |
2469 CAST_FROM_FN_PTR(address, | |
2470 InterpreterRuntime::post_field_modification), | |
2471 c_rarg1, c_rarg2, c_rarg3); | |
2472 __ get_cache_and_index_at_bcp(cache, index, 1); | |
2473 __ bind(L1); | |
2474 } | |
2475 } | |
2476 | |
2477 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { | |
2478 transition(vtos, vtos); | |
2479 | |
2480 const Register cache = rcx; | |
2481 const Register index = rdx; | |
2482 const Register obj = rcx; | |
2483 const Register off = rbx; | |
2484 const Register flags = rax; | |
2485 const Register bc = c_rarg3; | |
2486 | |
1565 | 2487 resolve_cache_and_index(byte_no, noreg, cache, index, sizeof(u2)); |
0 | 2488 jvmti_post_field_mod(cache, index, is_static); |
2489 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); | |
2490 | |
2491 // [jk] not needed currently | |
2492 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | | |
2493 // Assembler::StoreStore)); | |
2494 | |
2495 Label notVolatile, Done; | |
2496 __ movl(rdx, flags); | |
2497 __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2498 __ andl(rdx, 0x1); | |
2499 | |
2500 // field address | |
2501 const Address field(obj, off, Address::times_1); | |
2502 | |
2503 Label notByte, notInt, notShort, notChar, | |
2504 notLong, notFloat, notObj, notDouble; | |
2505 | |
2506 __ shrl(flags, ConstantPoolCacheEntry::tosBits); | |
2507 | |
2508 assert(btos == 0, "change code, btos != 0"); | |
2509 __ andl(flags, 0x0f); | |
2510 __ jcc(Assembler::notZero, notByte); | |
2511 // btos | |
2512 __ pop(btos); | |
2513 if (!is_static) pop_and_check_object(obj); | |
2514 __ movb(field, rax); | |
2515 if (!is_static) { | |
2516 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx); | |
2517 } | |
2518 __ jmp(Done); | |
2519 | |
2520 __ bind(notByte); | |
2521 __ cmpl(flags, atos); | |
2522 __ jcc(Assembler::notEqual, notObj); | |
2523 // atos | |
2524 __ pop(atos); | |
2525 if (!is_static) pop_and_check_object(obj); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2526 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2527 // Store into the field |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2528 do_oop_store(_masm, field, rax, _bs->kind(), false); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2529 |
0 | 2530 if (!is_static) { |
2531 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx); | |
2532 } | |
2533 __ jmp(Done); | |
2534 | |
2535 __ bind(notObj); | |
2536 __ cmpl(flags, itos); | |
2537 __ jcc(Assembler::notEqual, notInt); | |
2538 // itos | |
2539 __ pop(itos); | |
2540 if (!is_static) pop_and_check_object(obj); | |
2541 __ movl(field, rax); | |
2542 if (!is_static) { | |
2543 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx); | |
2544 } | |
2545 __ jmp(Done); | |
2546 | |
2547 __ bind(notInt); | |
2548 __ cmpl(flags, ctos); | |
2549 __ jcc(Assembler::notEqual, notChar); | |
2550 // ctos | |
2551 __ pop(ctos); | |
2552 if (!is_static) pop_and_check_object(obj); | |
2553 __ movw(field, rax); | |
2554 if (!is_static) { | |
2555 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx); | |
2556 } | |
2557 __ jmp(Done); | |
2558 | |
2559 __ bind(notChar); | |
2560 __ cmpl(flags, stos); | |
2561 __ jcc(Assembler::notEqual, notShort); | |
2562 // stos | |
2563 __ pop(stos); | |
2564 if (!is_static) pop_and_check_object(obj); | |
2565 __ movw(field, rax); | |
2566 if (!is_static) { | |
2567 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx); | |
2568 } | |
2569 __ jmp(Done); | |
2570 | |
2571 __ bind(notShort); | |
2572 __ cmpl(flags, ltos); | |
2573 __ jcc(Assembler::notEqual, notLong); | |
2574 // ltos | |
2575 __ pop(ltos); | |
2576 if (!is_static) pop_and_check_object(obj); | |
2577 __ movq(field, rax); | |
2578 if (!is_static) { | |
2579 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx); | |
2580 } | |
2581 __ jmp(Done); | |
2582 | |
2583 __ bind(notLong); | |
2584 __ cmpl(flags, ftos); | |
2585 __ jcc(Assembler::notEqual, notFloat); | |
2586 // ftos | |
2587 __ pop(ftos); | |
2588 if (!is_static) pop_and_check_object(obj); | |
2589 __ movflt(field, xmm0); | |
2590 if (!is_static) { | |
2591 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx); | |
2592 } | |
2593 __ jmp(Done); | |
2594 | |
2595 __ bind(notFloat); | |
2596 #ifdef ASSERT | |
2597 __ cmpl(flags, dtos); | |
2598 __ jcc(Assembler::notEqual, notDouble); | |
2599 #endif | |
2600 // dtos | |
2601 __ pop(dtos); | |
2602 if (!is_static) pop_and_check_object(obj); | |
2603 __ movdbl(field, xmm0); | |
2604 if (!is_static) { | |
2605 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx); | |
2606 } | |
2607 | |
2608 #ifdef ASSERT | |
2609 __ jmp(Done); | |
2610 | |
2611 __ bind(notDouble); | |
2612 __ stop("Bad state"); | |
2613 #endif | |
2614 | |
2615 __ bind(Done); | |
2616 // Check for volatile store | |
2617 __ testl(rdx, rdx); | |
2618 __ jcc(Assembler::zero, notVolatile); | |
2619 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | | |
2620 Assembler::StoreStore)); | |
2621 | |
2622 __ bind(notVolatile); | |
2623 } | |
2624 | |
2625 void TemplateTable::putfield(int byte_no) { | |
2626 putfield_or_static(byte_no, false); | |
2627 } | |
2628 | |
2629 void TemplateTable::putstatic(int byte_no) { | |
2630 putfield_or_static(byte_no, true); | |
2631 } | |
2632 | |
2633 void TemplateTable::jvmti_post_fast_field_mod() { | |
2634 if (JvmtiExport::can_post_field_modification()) { | |
2635 // Check to see if a field modification watch has been set before | |
2636 // we take the time to call into the VM. | |
2637 Label L2; | |
2638 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); | |
2639 __ testl(c_rarg3, c_rarg3); | |
2640 __ jcc(Assembler::zero, L2); | |
2641 __ pop_ptr(rbx); // copy the object pointer from tos | |
2642 __ verify_oop(rbx); | |
2643 __ push_ptr(rbx); // put the object pointer back on tos | |
304 | 2644 __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object |
2645 __ mov(c_rarg3, rsp); | |
0 | 2646 const Address field(c_rarg3, 0); |
2647 | |
2648 switch (bytecode()) { // load values into the jvalue object | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2649 case Bytecodes::_fast_aputfield: __ movq(field, rax); break; |
0 | 2650 case Bytecodes::_fast_lputfield: __ movq(field, rax); break; |
2651 case Bytecodes::_fast_iputfield: __ movl(field, rax); break; | |
2652 case Bytecodes::_fast_bputfield: __ movb(field, rax); break; | |
2653 case Bytecodes::_fast_sputfield: // fall through | |
2654 case Bytecodes::_fast_cputfield: __ movw(field, rax); break; | |
2655 case Bytecodes::_fast_fputfield: __ movflt(field, xmm0); break; | |
2656 case Bytecodes::_fast_dputfield: __ movdbl(field, xmm0); break; | |
2657 default: | |
2658 ShouldNotReachHere(); | |
2659 } | |
2660 | |
2661 // Save rax because call_VM() will clobber it, then use it for | |
2662 // JVMTI purposes | |
304 | 2663 __ push(rax); |
0 | 2664 // access constant pool cache entry |
2665 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1); | |
2666 __ verify_oop(rbx); | |
2667 // rbx: object pointer copied above | |
2668 // c_rarg2: cache entry pointer | |
2669 // c_rarg3: jvalue object on the stack | |
2670 __ call_VM(noreg, | |
2671 CAST_FROM_FN_PTR(address, | |
2672 InterpreterRuntime::post_field_modification), | |
2673 rbx, c_rarg2, c_rarg3); | |
304 | 2674 __ pop(rax); // restore lower value |
2675 __ addptr(rsp, sizeof(jvalue)); // release jvalue object space | |
0 | 2676 __ bind(L2); |
2677 } | |
2678 } | |
2679 | |
2680 void TemplateTable::fast_storefield(TosState state) { | |
2681 transition(state, vtos); | |
2682 | |
2683 ByteSize base = constantPoolCacheOopDesc::base_offset(); | |
2684 | |
2685 jvmti_post_fast_field_mod(); | |
2686 | |
2687 // access constant pool cache | |
2688 __ get_cache_and_index_at_bcp(rcx, rbx, 1); | |
2689 | |
2690 // test for volatile with rdx | |
2691 __ movl(rdx, Address(rcx, rbx, Address::times_8, | |
2692 in_bytes(base + | |
2693 ConstantPoolCacheEntry::flags_offset()))); | |
2694 | |
2695 // replace index with field offset from cache entry | |
304 | 2696 __ movptr(rbx, Address(rcx, rbx, Address::times_8, |
2697 in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); | |
0 | 2698 |
2699 // [jk] not needed currently | |
2700 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | | |
2701 // Assembler::StoreStore)); | |
2702 | |
2703 Label notVolatile; | |
2704 __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2705 __ andl(rdx, 0x1); | |
2706 | |
2707 // Get object from stack | |
2708 pop_and_check_object(rcx); | |
2709 | |
2710 // field address | |
2711 const Address field(rcx, rbx, Address::times_1); | |
2712 | |
2713 // access field | |
2714 switch (bytecode()) { | |
2715 case Bytecodes::_fast_aputfield: | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2716 do_oop_store(_masm, field, rax, _bs->kind(), false); |
0 | 2717 break; |
2718 case Bytecodes::_fast_lputfield: | |
2719 __ movq(field, rax); | |
2720 break; | |
2721 case Bytecodes::_fast_iputfield: | |
2722 __ movl(field, rax); | |
2723 break; | |
2724 case Bytecodes::_fast_bputfield: | |
2725 __ movb(field, rax); | |
2726 break; | |
2727 case Bytecodes::_fast_sputfield: | |
2728 // fall through | |
2729 case Bytecodes::_fast_cputfield: | |
2730 __ movw(field, rax); | |
2731 break; | |
2732 case Bytecodes::_fast_fputfield: | |
2733 __ movflt(field, xmm0); | |
2734 break; | |
2735 case Bytecodes::_fast_dputfield: | |
2736 __ movdbl(field, xmm0); | |
2737 break; | |
2738 default: | |
2739 ShouldNotReachHere(); | |
2740 } | |
2741 | |
2742 // Check for volatile store | |
2743 __ testl(rdx, rdx); | |
2744 __ jcc(Assembler::zero, notVolatile); | |
2745 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | | |
2746 Assembler::StoreStore)); | |
2747 __ bind(notVolatile); | |
2748 } | |
2749 | |
2750 | |
2751 void TemplateTable::fast_accessfield(TosState state) { | |
2752 transition(atos, state); | |
2753 | |
2754 // Do the JVMTI work here to avoid disturbing the register state below | |
2755 if (JvmtiExport::can_post_field_access()) { | |
2756 // Check to see if a field access watch has been set before we | |
2757 // take the time to call into the VM. | |
2758 Label L1; | |
2759 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); | |
2760 __ testl(rcx, rcx); | |
2761 __ jcc(Assembler::zero, L1); | |
2762 // access constant pool cache entry | |
2763 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2764 __ verify_oop(rax); |
304 | 2765 __ mov(r12, rax); // save object pointer before call_VM() clobbers it |
2766 __ mov(c_rarg1, rax); | |
0 | 2767 // c_rarg1: object pointer copied above |
2768 // c_rarg2: cache entry pointer | |
2769 __ call_VM(noreg, | |
2770 CAST_FROM_FN_PTR(address, | |
2771 InterpreterRuntime::post_field_access), | |
2772 c_rarg1, c_rarg2); | |
304 | 2773 __ mov(rax, r12); // restore object pointer |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2774 __ reinit_heapbase(); |
0 | 2775 __ bind(L1); |
2776 } | |
2777 | |
2778 // access constant pool cache | |
2779 __ get_cache_and_index_at_bcp(rcx, rbx, 1); | |
2780 // replace index with field offset from cache entry | |
2781 // [jk] not needed currently | |
2782 // if (os::is_MP()) { | |
2783 // __ movl(rdx, Address(rcx, rbx, Address::times_8, | |
2784 // in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2785 // ConstantPoolCacheEntry::flags_offset()))); | |
2786 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2787 // __ andl(rdx, 0x1); | |
2788 // } | |
304 | 2789 __ movptr(rbx, Address(rcx, rbx, Address::times_8, |
2790 in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2791 ConstantPoolCacheEntry::f2_offset()))); | |
0 | 2792 |
2793 // rax: object | |
2794 __ verify_oop(rax); | |
2795 __ null_check(rax); | |
2796 Address field(rax, rbx, Address::times_1); | |
2797 | |
2798 // access field | |
2799 switch (bytecode()) { | |
2800 case Bytecodes::_fast_agetfield: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2801 __ load_heap_oop(rax, field); |
0 | 2802 __ verify_oop(rax); |
2803 break; | |
2804 case Bytecodes::_fast_lgetfield: | |
2805 __ movq(rax, field); | |
2806 break; | |
2807 case Bytecodes::_fast_igetfield: | |
2808 __ movl(rax, field); | |
2809 break; | |
2810 case Bytecodes::_fast_bgetfield: | |
2811 __ movsbl(rax, field); | |
2812 break; | |
2813 case Bytecodes::_fast_sgetfield: | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
2814 __ load_signed_short(rax, field); |
0 | 2815 break; |
2816 case Bytecodes::_fast_cgetfield: | |
622
56aae7be60d4
6812678: macro assembler needs delayed binding of a few constants (for 6655638)
jrose
parents:
605
diff
changeset
|
2817 __ load_unsigned_short(rax, field); |
0 | 2818 break; |
2819 case Bytecodes::_fast_fgetfield: | |
2820 __ movflt(xmm0, field); | |
2821 break; | |
2822 case Bytecodes::_fast_dgetfield: | |
2823 __ movdbl(xmm0, field); | |
2824 break; | |
2825 default: | |
2826 ShouldNotReachHere(); | |
2827 } | |
2828 // [jk] not needed currently | |
2829 // if (os::is_MP()) { | |
2830 // Label notVolatile; | |
2831 // __ testl(rdx, rdx); | |
2832 // __ jcc(Assembler::zero, notVolatile); | |
2833 // __ membar(Assembler::LoadLoad); | |
2834 // __ bind(notVolatile); | |
2835 //}; | |
2836 } | |
2837 | |
2838 void TemplateTable::fast_xaccess(TosState state) { | |
2839 transition(vtos, state); | |
2840 | |
2841 // get receiver | |
304 | 2842 __ movptr(rax, aaddress(0)); |
0 | 2843 // access constant pool cache |
2844 __ get_cache_and_index_at_bcp(rcx, rdx, 2); | |
304 | 2845 __ movptr(rbx, |
2846 Address(rcx, rdx, Address::times_8, | |
2847 in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2848 ConstantPoolCacheEntry::f2_offset()))); | |
0 | 2849 // make sure exception is reported in correct bcp range (getfield is |
2850 // next instruction) | |
304 | 2851 __ increment(r13); |
0 | 2852 __ null_check(rax); |
2853 switch (state) { | |
2854 case itos: | |
2855 __ movl(rax, Address(rax, rbx, Address::times_1)); | |
2856 break; | |
2857 case atos: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2858 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1)); |
0 | 2859 __ verify_oop(rax); |
2860 break; | |
2861 case ftos: | |
2862 __ movflt(xmm0, Address(rax, rbx, Address::times_1)); | |
2863 break; | |
2864 default: | |
2865 ShouldNotReachHere(); | |
2866 } | |
2867 | |
2868 // [jk] not needed currently | |
2869 // if (os::is_MP()) { | |
2870 // Label notVolatile; | |
2871 // __ movl(rdx, Address(rcx, rdx, Address::times_8, | |
2872 // in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2873 // ConstantPoolCacheEntry::flags_offset()))); | |
2874 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2875 // __ testl(rdx, 0x1); | |
2876 // __ jcc(Assembler::zero, notVolatile); | |
2877 // __ membar(Assembler::LoadLoad); | |
2878 // __ bind(notVolatile); | |
2879 // } | |
2880 | |
304 | 2881 __ decrement(r13); |
0 | 2882 } |
2883 | |
2884 | |
2885 | |
2886 //----------------------------------------------------------------------------- | |
2887 // Calls | |
2888 | |
2889 void TemplateTable::count_calls(Register method, Register temp) { | |
2890 // implemented elsewhere | |
2891 ShouldNotReachHere(); | |
2892 } | |
2893 | |
1108 | 2894 void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) { |
0 | 2895 // determine flags |
1108 | 2896 Bytecodes::Code code = bytecode(); |
0 | 2897 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; |
1108 | 2898 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; |
0 | 2899 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; |
2900 const bool is_invokespecial = code == Bytecodes::_invokespecial; | |
1108 | 2901 const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic); |
0 | 2902 const bool receiver_null_check = is_invokespecial; |
2903 const bool save_flags = is_invokeinterface || is_invokevirtual; | |
2904 // setup registers & access constant pool cache | |
2905 const Register recv = rcx; | |
2906 const Register flags = rdx; | |
2907 assert_different_registers(method, index, recv, flags); | |
2908 | |
2909 // save 'interpreter return address' | |
2910 __ save_bcp(); | |
2911 | |
1565 | 2912 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic); |
0 | 2913 |
2914 // load receiver if needed (note: no return address pushed yet) | |
2915 if (load_receiver) { | |
1565 | 2916 assert(!is_invokedynamic, ""); |
0 | 2917 __ movl(recv, flags); |
2918 __ andl(recv, 0xFF); | |
1108 | 2919 Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1)); |
1304 | 2920 __ movptr(recv, recv_addr); |
2921 __ verify_oop(recv); | |
0 | 2922 } |
2923 | |
2924 // do null check if needed | |
2925 if (receiver_null_check) { | |
2926 __ null_check(recv); | |
2927 } | |
2928 | |
2929 if (save_flags) { | |
2930 __ movl(r13, flags); | |
2931 } | |
2932 | |
2933 // compute return type | |
2934 __ shrl(flags, ConstantPoolCacheEntry::tosBits); | |
2935 // Make sure we don't need to mask flags for tosBits after the above shift | |
2936 ConstantPoolCacheEntry::verify_tosBits(); | |
2937 // load return address | |
2938 { | |
1108 | 2939 address table_addr; |
2940 if (is_invokeinterface || is_invokedynamic) | |
2941 table_addr = (address)Interpreter::return_5_addrs_by_index_table(); | |
2942 else | |
2943 table_addr = (address)Interpreter::return_3_addrs_by_index_table(); | |
2944 ExternalAddress table(table_addr); | |
2945 __ lea(rscratch1, table); | |
2946 __ movptr(flags, Address(rscratch1, flags, Address::times_ptr)); | |
0 | 2947 } |
2948 | |
2949 // push return address | |
304 | 2950 __ push(flags); |
0 | 2951 |
2952 // Restore flag field from the constant pool cache, and restore esi | |
2953 // for later null checks. r13 is the bytecode pointer | |
2954 if (save_flags) { | |
2955 __ movl(flags, r13); | |
2956 __ restore_bcp(); | |
2957 } | |
2958 } | |
2959 | |
2960 | |
2961 void TemplateTable::invokevirtual_helper(Register index, | |
2962 Register recv, | |
2963 Register flags) { | |
1783 | 2964 // Uses temporary registers rax, rdx |
2965 assert_different_registers(index, recv, rax, rdx); | |
0 | 2966 |
2967 // Test for an invoke of a final method | |
2968 Label notFinal; | |
2969 __ movl(rax, flags); | |
2970 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod)); | |
2971 __ jcc(Assembler::zero, notFinal); | |
2972 | |
2973 const Register method = index; // method must be rbx | |
2974 assert(method == rbx, | |
2975 "methodOop must be rbx for interpreter calling convention"); | |
2976 | |
2977 // do the call - the index is actually the method to call | |
2978 __ verify_oop(method); | |
2979 | |
2980 // It's final, need a null check here! | |
2981 __ null_check(recv); | |
2982 | |
2983 // profile this call | |
2984 __ profile_final_call(rax); | |
2985 | |
2986 __ jump_from_interpreted(method, rax); | |
2987 | |
2988 __ bind(notFinal); | |
2989 | |
2990 // get receiver klass | |
2991 __ null_check(recv, oopDesc::klass_offset_in_bytes()); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2992 __ load_klass(rax, recv); |
0 | 2993 |
2994 __ verify_oop(rax); | |
2995 | |
2996 // profile this call | |
2997 __ profile_virtual_call(rax, r14, rdx); | |
2998 | |
2999 // get target methodOop & entry point | |
3000 const int base = instanceKlass::vtable_start_offset() * wordSize; | |
3001 assert(vtableEntry::size() * wordSize == 8, | |
3002 "adjust the scaling in the code below"); | |
304 | 3003 __ movptr(method, Address(rax, index, |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3004 Address::times_8, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3005 base + vtableEntry::method_offset_in_bytes())); |
304 | 3006 __ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset())); |
0 | 3007 __ jump_from_interpreted(method, rdx); |
3008 } | |
3009 | |
3010 | |
3011 void TemplateTable::invokevirtual(int byte_no) { | |
3012 transition(vtos, vtos); | |
1565 | 3013 assert(byte_no == f2_byte, "use this argument"); |
1108 | 3014 prepare_invoke(rbx, noreg, byte_no); |
0 | 3015 |
3016 // rbx: index | |
3017 // rcx: receiver | |
3018 // rdx: flags | |
3019 | |
3020 invokevirtual_helper(rbx, rcx, rdx); | |
3021 } | |
3022 | |
3023 | |
3024 void TemplateTable::invokespecial(int byte_no) { | |
3025 transition(vtos, vtos); | |
1565 | 3026 assert(byte_no == f1_byte, "use this argument"); |
1108 | 3027 prepare_invoke(rbx, noreg, byte_no); |
0 | 3028 // do the call |
3029 __ verify_oop(rbx); | |
3030 __ profile_call(rax); | |
3031 __ jump_from_interpreted(rbx, rax); | |
3032 } | |
3033 | |
3034 | |
3035 void TemplateTable::invokestatic(int byte_no) { | |
3036 transition(vtos, vtos); | |
1565 | 3037 assert(byte_no == f1_byte, "use this argument"); |
1108 | 3038 prepare_invoke(rbx, noreg, byte_no); |
0 | 3039 // do the call |
3040 __ verify_oop(rbx); | |
3041 __ profile_call(rax); | |
3042 __ jump_from_interpreted(rbx, rax); | |
3043 } | |
3044 | |
3045 void TemplateTable::fast_invokevfinal(int byte_no) { | |
3046 transition(vtos, vtos); | |
1565 | 3047 assert(byte_no == f2_byte, "use this argument"); |
0 | 3048 __ stop("fast_invokevfinal not used on amd64"); |
3049 } | |
3050 | |
3051 void TemplateTable::invokeinterface(int byte_no) { | |
3052 transition(vtos, vtos); | |
1565 | 3053 assert(byte_no == f1_byte, "use this argument"); |
1108 | 3054 prepare_invoke(rax, rbx, byte_no); |
0 | 3055 |
3056 // rax: Interface | |
3057 // rbx: index | |
3058 // rcx: receiver | |
3059 // rdx: flags | |
3060 | |
3061 // Special case of invokeinterface called for virtual method of | |
3062 // java.lang.Object. See cpCacheOop.cpp for details. | |
3063 // This code isn't produced by javac, but could be produced by | |
3064 // another compliant java compiler. | |
3065 Label notMethod; | |
3066 __ movl(r14, rdx); | |
3067 __ andl(r14, (1 << ConstantPoolCacheEntry::methodInterface)); | |
3068 __ jcc(Assembler::zero, notMethod); | |
3069 | |
3070 invokevirtual_helper(rbx, rcx, rdx); | |
3071 __ bind(notMethod); | |
3072 | |
3073 // Get receiver klass into rdx - also a null check | |
3074 __ restore_locals(); // restore r14 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3075 __ load_klass(rdx, rcx); |
0 | 3076 __ verify_oop(rdx); |
3077 | |
3078 // profile this call | |
3079 __ profile_virtual_call(rdx, r13, r14); | |
3080 | |
623
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3081 Label no_such_interface, no_such_method; |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3082 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3083 __ lookup_interface_method(// inputs: rec. class, interface, itable index |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3084 rdx, rax, rbx, |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3085 // outputs: method, scan temp. reg |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3086 rbx, r13, |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3087 no_such_interface); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3088 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3089 // rbx,: methodOop to call |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3090 // rcx: receiver |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3091 // Check for abstract method error |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3092 // Note: This should be done more efficiently via a throw_abstract_method_error |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3093 // interpreter entry point and a conditional jump to it in case of a null |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3094 // method. |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3095 __ testptr(rbx, rbx); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3096 __ jcc(Assembler::zero, no_such_method); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3097 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3098 // do the call |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3099 // rcx: receiver |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3100 // rbx,: methodOop |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3101 __ jump_from_interpreted(rbx, rdx); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3102 __ should_not_reach_here(); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3103 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3104 // exception handling code follows... |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3105 // note: must restore interpreter registers to canonical |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3106 // state for exception handling to work correctly! |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3107 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3108 __ bind(no_such_method); |
0 | 3109 // throw exception |
623
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3110 __ pop(rbx); // pop return address (pushed by prepare_invoke) |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3111 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed) |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3112 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3113 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3114 // the call_VM checks for exception, so we should never return here. |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3115 __ should_not_reach_here(); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3116 |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3117 __ bind(no_such_interface); |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3118 // throw exception |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3119 __ pop(rbx); // pop return address (pushed by prepare_invoke) |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3120 __ restore_bcp(); // r13 must be correct for exception handler (was destroyed) |
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3121 __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) |
0 | 3122 __ call_VM(noreg, CAST_FROM_FN_PTR(address, |
3123 InterpreterRuntime::throw_IncompatibleClassChangeError)); | |
3124 // the call_VM checks for exception, so we should never return here. | |
3125 __ should_not_reach_here(); | |
623
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3126 return; |
0 | 3127 } |
3128 | |
726
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3129 void TemplateTable::invokedynamic(int byte_no) { |
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3130 transition(vtos, vtos); |
1565 | 3131 assert(byte_no == f1_oop, "use this argument"); |
726
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3132 |
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3133 if (!EnableInvokeDynamic) { |
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3134 // We should not encounter this bytecode if !EnableInvokeDynamic. |
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3135 // The verifier will stop it. However, if we get past the verifier, |
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3136 // this will stop the thread in a reasonable way, without crashing the JVM. |
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3137 __ call_VM(noreg, CAST_FROM_FN_PTR(address, |
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3138 InterpreterRuntime::throw_IncompatibleClassChangeError)); |
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3139 // the call_VM checks for exception, so we should never return here. |
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3140 __ should_not_reach_here(); |
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3141 return; |
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3142 } |
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3143 |
1565 | 3144 assert(byte_no == f1_oop, "use this argument"); |
1108 | 3145 prepare_invoke(rax, rbx, byte_no); |
3146 | |
3147 // rax: CallSite object (f1) | |
3148 // rbx: unused (f2) | |
3149 // rcx: receiver address | |
3150 // rdx: flags (unused) | |
3151 | |
1846 | 3152 Register rax_callsite = rax; |
3153 Register rcx_method_handle = rcx; | |
3154 | |
1108 | 3155 if (ProfileInterpreter) { |
3156 // %%% should make a type profile for any invokedynamic that takes a ref argument | |
3157 // profile this call | |
3158 __ profile_call(r13); | |
3159 } | |
3160 | |
1846 | 3161 __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx))); |
3162 __ null_check(rcx_method_handle); | |
1108 | 3163 __ prepare_to_jump_from_interpreted(); |
1846 | 3164 __ jump_to_method_handle_entry(rcx_method_handle, rdx); |
726
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3165 } |
be93aad57795
6655646: dynamic languages need dynamically linked call sites
jrose
parents:
623
diff
changeset
|
3166 |
623
9adddb8c0fc8
6812831: factor duplicated assembly code for megamorphic invokeinterface (for 6655638)
jrose
parents:
622
diff
changeset
|
3167 |
0 | 3168 //----------------------------------------------------------------------------- |
3169 // Allocation | |
3170 | |
3171 void TemplateTable::_new() { | |
3172 transition(vtos, atos); | |
3173 __ get_unsigned_2_byte_index_at_bcp(rdx, 1); | |
3174 Label slow_case; | |
3175 Label done; | |
3176 Label initialize_header; | |
3177 Label initialize_object; // including clearing the fields | |
3178 Label allocate_shared; | |
3179 | |
3180 __ get_cpool_and_tags(rsi, rax); | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1602
diff
changeset
|
3181 // Make sure the class we're about to instantiate has been resolved. |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1602
diff
changeset
|
3182 // This is done before loading instanceKlass to be consistent with the order |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1602
diff
changeset
|
3183 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put) |
0 | 3184 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; |
3185 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), | |
3186 JVM_CONSTANT_Class); | |
3187 __ jcc(Assembler::notEqual, slow_case); | |
3188 | |
1681
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1602
diff
changeset
|
3189 // get instanceKlass |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1602
diff
changeset
|
3190 __ movptr(rsi, Address(rsi, rdx, |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1602
diff
changeset
|
3191 Address::times_8, sizeof(constantPoolOopDesc))); |
126ea7725993
6953477: Increase portability and flexibility of building Hotspot
bobv
parents:
1602
diff
changeset
|
3192 |
0 | 3193 // make sure klass is initialized & doesn't have finalizer |
3194 // make sure klass is fully initialized | |
3195 __ cmpl(Address(rsi, | |
3196 instanceKlass::init_state_offset_in_bytes() + | |
3197 sizeof(oopDesc)), | |
3198 instanceKlass::fully_initialized); | |
3199 __ jcc(Assembler::notEqual, slow_case); | |
3200 | |
3201 // get instance_size in instanceKlass (scaled to a count of bytes) | |
3202 __ movl(rdx, | |
3203 Address(rsi, | |
3204 Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); | |
3205 // test to see if it has a finalizer or is malformed in some way | |
3206 __ testl(rdx, Klass::_lh_instance_slow_path_bit); | |
3207 __ jcc(Assembler::notZero, slow_case); | |
3208 | |
3209 // Allocate the instance | |
3210 // 1) Try to allocate in the TLAB | |
3211 // 2) if fail and the object is large allocate in the shared Eden | |
3212 // 3) if the above fails (or is not applicable), go to a slow case | |
3213 // (creates a new TLAB, etc.) | |
3214 | |
3215 const bool allow_shared_alloc = | |
3216 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; | |
3217 | |
3218 if (UseTLAB) { | |
304 | 3219 __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); |
3220 __ lea(rbx, Address(rax, rdx, Address::times_1)); | |
3221 __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); | |
0 | 3222 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); |
304 | 3223 __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx); |
0 | 3224 if (ZeroTLAB) { |
3225 // the fields have been already cleared | |
3226 __ jmp(initialize_header); | |
3227 } else { | |
3228 // initialize both the header and fields | |
3229 __ jmp(initialize_object); | |
3230 } | |
3231 } | |
3232 | |
3233 // Allocation in the shared Eden, if allowed. | |
3234 // | |
3235 // rdx: instance size in bytes | |
3236 if (allow_shared_alloc) { | |
3237 __ bind(allocate_shared); | |
3238 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
3239 ExternalAddress top((address)Universe::heap()->top_addr()); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
3240 ExternalAddress end((address)Universe::heap()->end_addr()); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
3241 |
0 | 3242 const Register RtopAddr = rscratch1; |
3243 const Register RendAddr = rscratch2; | |
3244 | |
3245 __ lea(RtopAddr, top); | |
3246 __ lea(RendAddr, end); | |
304 | 3247 __ movptr(rax, Address(RtopAddr, 0)); |
0 | 3248 |
3249 // For retries rax gets set by cmpxchgq | |
3250 Label retry; | |
3251 __ bind(retry); | |
304 | 3252 __ lea(rbx, Address(rax, rdx, Address::times_1)); |
3253 __ cmpptr(rbx, Address(RendAddr, 0)); | |
0 | 3254 __ jcc(Assembler::above, slow_case); |
3255 | |
3256 // Compare rax with the top addr, and if still equal, store the new | |
3257 // top addr in rbx at the address of the top addr pointer. Sets ZF if was | |
3258 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. | |
3259 // | |
3260 // rax: object begin | |
3261 // rbx: object end | |
3262 // rdx: instance size in bytes | |
3263 if (os::is_MP()) { | |
3264 __ lock(); | |
3265 } | |
304 | 3266 __ cmpxchgptr(rbx, Address(RtopAddr, 0)); |
0 | 3267 |
3268 // if someone beat us on the allocation, try again, otherwise continue | |
3269 __ jcc(Assembler::notEqual, retry); | |
3270 } | |
3271 | |
3272 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { | |
3273 // The object is initialized before the header. If the object size is | |
3274 // zero, go directly to the header initialization. | |
3275 __ bind(initialize_object); | |
3276 __ decrementl(rdx, sizeof(oopDesc)); | |
3277 __ jcc(Assembler::zero, initialize_header); | |
3278 | |
3279 // Initialize object fields | |
3280 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) | |
3281 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop | |
3282 { | |
3283 Label loop; | |
3284 __ bind(loop); | |
3285 __ movq(Address(rax, rdx, Address::times_8, | |
3286 sizeof(oopDesc) - oopSize), | |
3287 rcx); | |
3288 __ decrementl(rdx); | |
3289 __ jcc(Assembler::notZero, loop); | |
3290 } | |
3291 | |
3292 // initialize object header only. | |
3293 __ bind(initialize_header); | |
3294 if (UseBiasedLocking) { | |
304 | 3295 __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); |
3296 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1); | |
0 | 3297 } else { |
3298 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), | |
3299 (intptr_t) markOopDesc::prototype()); // header (address 0x1) | |
3300 } | |
167
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
113
diff
changeset
|
3301 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) |
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
113
diff
changeset
|
3302 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops |
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
113
diff
changeset
|
3303 __ store_klass(rax, rsi); // store klass last |
1248
455df1b81409
6587322: dtrace probe object__alloc doesn't fire in some situations on amd64
kamg
parents:
1108
diff
changeset
|
3304 |
455df1b81409
6587322: dtrace probe object__alloc doesn't fire in some situations on amd64
kamg
parents:
1108
diff
changeset
|
3305 { |
455df1b81409
6587322: dtrace probe object__alloc doesn't fire in some situations on amd64
kamg
parents:
1108
diff
changeset
|
3306 SkipIfEqual skip(_masm, &DTraceAllocProbes, false); |
455df1b81409
6587322: dtrace probe object__alloc doesn't fire in some situations on amd64
kamg
parents:
1108
diff
changeset
|
3307 // Trigger dtrace event for fastpath |
455df1b81409
6587322: dtrace probe object__alloc doesn't fire in some situations on amd64
kamg
parents:
1108
diff
changeset
|
3308 __ push(atos); // save the return value |
455df1b81409
6587322: dtrace probe object__alloc doesn't fire in some situations on amd64
kamg
parents:
1108
diff
changeset
|
3309 __ call_VM_leaf( |
455df1b81409
6587322: dtrace probe object__alloc doesn't fire in some situations on amd64
kamg
parents:
1108
diff
changeset
|
3310 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax); |
455df1b81409
6587322: dtrace probe object__alloc doesn't fire in some situations on amd64
kamg
parents:
1108
diff
changeset
|
3311 __ pop(atos); // restore the return value |
455df1b81409
6587322: dtrace probe object__alloc doesn't fire in some situations on amd64
kamg
parents:
1108
diff
changeset
|
3312 |
455df1b81409
6587322: dtrace probe object__alloc doesn't fire in some situations on amd64
kamg
parents:
1108
diff
changeset
|
3313 } |
0 | 3314 __ jmp(done); |
3315 } | |
3316 | |
3317 | |
3318 // slow case | |
3319 __ bind(slow_case); | |
3320 __ get_constant_pool(c_rarg1); | |
3321 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); | |
3322 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2); | |
3323 __ verify_oop(rax); | |
3324 | |
3325 // continue | |
3326 __ bind(done); | |
3327 } | |
3328 | |
3329 void TemplateTable::newarray() { | |
3330 transition(itos, atos); | |
3331 __ load_unsigned_byte(c_rarg1, at_bcp(1)); | |
3332 __ movl(c_rarg2, rax); | |
3333 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), | |
3334 c_rarg1, c_rarg2); | |
3335 } | |
3336 | |
3337 void TemplateTable::anewarray() { | |
3338 transition(itos, atos); | |
3339 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); | |
3340 __ get_constant_pool(c_rarg1); | |
3341 __ movl(c_rarg3, rax); | |
3342 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), | |
3343 c_rarg1, c_rarg2, c_rarg3); | |
3344 } | |
3345 | |
3346 void TemplateTable::arraylength() { | |
3347 transition(atos, itos); | |
3348 __ null_check(rax, arrayOopDesc::length_offset_in_bytes()); | |
3349 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes())); | |
3350 } | |
3351 | |
3352 void TemplateTable::checkcast() { | |
3353 transition(atos, atos); | |
3354 Label done, is_null, ok_is_subtype, quicked, resolved; | |
304 | 3355 __ testptr(rax, rax); // object is in rax |
0 | 3356 __ jcc(Assembler::zero, is_null); |
3357 | |
3358 // Get cpool & tags index | |
3359 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array | |
3360 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index | |
3361 // See if bytecode has already been quicked | |
3362 __ cmpb(Address(rdx, rbx, | |
3363 Address::times_1, | |
3364 typeArrayOopDesc::header_size(T_BYTE) * wordSize), | |
3365 JVM_CONSTANT_Class); | |
3366 __ jcc(Assembler::equal, quicked); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3367 __ push(atos); // save receiver for result, and for GC |
304 | 3368 __ mov(r12, rcx); // save rcx XXX |
0 | 3369 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3370 __ movq(rcx, r12); // restore rcx XXX |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3371 __ reinit_heapbase(); |
0 | 3372 __ pop_ptr(rdx); // restore receiver |
3373 __ jmpb(resolved); | |
3374 | |
3375 // Get superklass in rax and subklass in rbx | |
3376 __ bind(quicked); | |
304 | 3377 __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check |
3378 __ movptr(rax, Address(rcx, rbx, | |
0 | 3379 Address::times_8, sizeof(constantPoolOopDesc))); |
3380 | |
3381 __ bind(resolved); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3382 __ load_klass(rbx, rdx); |
0 | 3383 |
3384 // Generate subtype check. Blows rcx, rdi. Object in rdx. | |
3385 // Superklass in rax. Subklass in rbx. | |
3386 __ gen_subtype_check(rbx, ok_is_subtype); | |
3387 | |
3388 // Come here on failure | |
3389 __ push_ptr(rdx); | |
3390 // object is at TOS | |
3391 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry)); | |
3392 | |
3393 // Come here on success | |
3394 __ bind(ok_is_subtype); | |
304 | 3395 __ mov(rax, rdx); // Restore object in rdx |
0 | 3396 |
3397 // Collect counts on whether this check-cast sees NULLs a lot or not. | |
3398 if (ProfileInterpreter) { | |
3399 __ jmp(done); | |
3400 __ bind(is_null); | |
3401 __ profile_null_seen(rcx); | |
3402 } else { | |
3403 __ bind(is_null); // same as 'done' | |
3404 } | |
3405 __ bind(done); | |
3406 } | |
3407 | |
3408 void TemplateTable::instanceof() { | |
3409 transition(atos, itos); | |
3410 Label done, is_null, ok_is_subtype, quicked, resolved; | |
304 | 3411 __ testptr(rax, rax); |
0 | 3412 __ jcc(Assembler::zero, is_null); |
3413 | |
3414 // Get cpool & tags index | |
3415 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array | |
3416 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index | |
3417 // See if bytecode has already been quicked | |
3418 __ cmpb(Address(rdx, rbx, | |
3419 Address::times_1, | |
3420 typeArrayOopDesc::header_size(T_BYTE) * wordSize), | |
3421 JVM_CONSTANT_Class); | |
3422 __ jcc(Assembler::equal, quicked); | |
3423 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3424 __ push(atos); // save receiver for result, and for GC |
304 | 3425 __ mov(r12, rcx); // save rcx |
0 | 3426 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3427 __ movq(rcx, r12); // restore rcx |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3428 __ reinit_heapbase(); |
0 | 3429 __ pop_ptr(rdx); // restore receiver |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3430 __ load_klass(rdx, rdx); |
0 | 3431 __ jmpb(resolved); |
3432 | |
3433 // Get superklass in rax and subklass in rdx | |
3434 __ bind(quicked); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3435 __ load_klass(rdx, rax); |
304 | 3436 __ movptr(rax, Address(rcx, rbx, |
3437 Address::times_8, sizeof(constantPoolOopDesc))); | |
0 | 3438 |
3439 __ bind(resolved); | |
3440 | |
3441 // Generate subtype check. Blows rcx, rdi | |
3442 // Superklass in rax. Subklass in rdx. | |
3443 __ gen_subtype_check(rdx, ok_is_subtype); | |
3444 | |
3445 // Come here on failure | |
3446 __ xorl(rax, rax); | |
3447 __ jmpb(done); | |
3448 // Come here on success | |
3449 __ bind(ok_is_subtype); | |
3450 __ movl(rax, 1); | |
3451 | |
3452 // Collect counts on whether this test sees NULLs a lot or not. | |
3453 if (ProfileInterpreter) { | |
3454 __ jmp(done); | |
3455 __ bind(is_null); | |
3456 __ profile_null_seen(rcx); | |
3457 } else { | |
3458 __ bind(is_null); // same as 'done' | |
3459 } | |
3460 __ bind(done); | |
3461 // rax = 0: obj == NULL or obj is not an instanceof the specified klass | |
3462 // rax = 1: obj != NULL and obj is an instanceof the specified klass | |
3463 } | |
3464 | |
3465 //----------------------------------------------------------------------------- | |
3466 // Breakpoints | |
3467 void TemplateTable::_breakpoint() { | |
3468 // Note: We get here even if we are single stepping.. | |
3469 // jbug inists on setting breakpoints at every bytecode | |
3470 // even if we are in single step mode. | |
3471 | |
3472 transition(vtos, vtos); | |
3473 | |
3474 // get the unpatched byte code | |
3475 __ get_method(c_rarg1); | |
3476 __ call_VM(noreg, | |
3477 CAST_FROM_FN_PTR(address, | |
3478 InterpreterRuntime::get_original_bytecode_at), | |
3479 c_rarg1, r13); | |
304 | 3480 __ mov(rbx, rax); |
0 | 3481 |
3482 // post the breakpoint event | |
3483 __ get_method(c_rarg1); | |
3484 __ call_VM(noreg, | |
3485 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), | |
3486 c_rarg1, r13); | |
3487 | |
3488 // complete the execution of original bytecode | |
3489 __ dispatch_only_normal(vtos); | |
3490 } | |
3491 | |
3492 //----------------------------------------------------------------------------- | |
3493 // Exceptions | |
3494 | |
3495 void TemplateTable::athrow() { | |
3496 transition(atos, vtos); | |
3497 __ null_check(rax); | |
3498 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); | |
3499 } | |
3500 | |
3501 //----------------------------------------------------------------------------- | |
3502 // Synchronization | |
3503 // | |
3504 // Note: monitorenter & exit are symmetric routines; which is reflected | |
3505 // in the assembly code structure as well | |
3506 // | |
3507 // Stack layout: | |
3508 // | |
3509 // [expressions ] <--- rsp = expression stack top | |
3510 // .. | |
3511 // [expressions ] | |
3512 // [monitor entry] <--- monitor block top = expression stack bot | |
3513 // .. | |
3514 // [monitor entry] | |
3515 // [frame data ] <--- monitor block bot | |
3516 // ... | |
3517 // [saved rbp ] <--- rbp | |
3518 void TemplateTable::monitorenter() { | |
3519 transition(atos, vtos); | |
3520 | |
3521 // check for NULL object | |
3522 __ null_check(rax); | |
3523 | |
3524 const Address monitor_block_top( | |
3525 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); | |
3526 const Address monitor_block_bot( | |
3527 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); | |
3528 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; | |
3529 | |
3530 Label allocated; | |
3531 | |
3532 // initialize entry pointer | |
3533 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL | |
3534 | |
3535 // find a free slot in the monitor block (result in c_rarg1) | |
3536 { | |
3537 Label entry, loop, exit; | |
304 | 3538 __ movptr(c_rarg3, monitor_block_top); // points to current entry, |
0 | 3539 // starting with top-most entry |
304 | 3540 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom |
0 | 3541 // of monitor block |
3542 __ jmpb(entry); | |
3543 | |
3544 __ bind(loop); | |
3545 // check if current entry is used | |
304 | 3546 __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD); |
0 | 3547 // if not used then remember entry in c_rarg1 |
304 | 3548 __ cmov(Assembler::equal, c_rarg1, c_rarg3); |
0 | 3549 // check if current entry is for same object |
304 | 3550 __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes())); |
0 | 3551 // if same object then stop searching |
3552 __ jccb(Assembler::equal, exit); | |
3553 // otherwise advance to next entry | |
304 | 3554 __ addptr(c_rarg3, entry_size); |
0 | 3555 __ bind(entry); |
3556 // check if bottom reached | |
304 | 3557 __ cmpptr(c_rarg3, c_rarg2); |
0 | 3558 // if not at bottom then check this entry |
3559 __ jcc(Assembler::notEqual, loop); | |
3560 __ bind(exit); | |
3561 } | |
3562 | |
304 | 3563 __ testptr(c_rarg1, c_rarg1); // check if a slot has been found |
0 | 3564 __ jcc(Assembler::notZero, allocated); // if found, continue with that one |
3565 | |
3566 // allocate one if there's no free slot | |
3567 { | |
3568 Label entry, loop; | |
304 | 3569 // 1. compute new pointers // rsp: old expression stack top |
3570 __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom | |
3571 __ subptr(rsp, entry_size); // move expression stack top | |
3572 __ subptr(c_rarg1, entry_size); // move expression stack bottom | |
3573 __ mov(c_rarg3, rsp); // set start value for copy loop | |
3574 __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom | |
0 | 3575 __ jmp(entry); |
3576 // 2. move expression stack contents | |
3577 __ bind(loop); | |
304 | 3578 __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack |
3579 // word from old location | |
3580 __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location | |
3581 __ addptr(c_rarg3, wordSize); // advance to next word | |
0 | 3582 __ bind(entry); |
304 | 3583 __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached |
0 | 3584 __ jcc(Assembler::notEqual, loop); // if not at bottom then |
3585 // copy next word | |
3586 } | |
3587 | |
3588 // call run-time routine | |
3589 // c_rarg1: points to monitor entry | |
3590 __ bind(allocated); | |
3591 | |
3592 // Increment bcp to point to the next bytecode, so exception | |
3593 // handling for async. exceptions work correctly. | |
3594 // The object has already been poped from the stack, so the | |
3595 // expression stack looks correct. | |
304 | 3596 __ increment(r13); |
0 | 3597 |
3598 // store object | |
304 | 3599 __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax); |
0 | 3600 __ lock_object(c_rarg1); |
3601 | |
3602 // check to make sure this monitor doesn't cause stack overflow after locking | |
3603 __ save_bcp(); // in case of exception | |
3604 __ generate_stack_overflow_check(0); | |
3605 | |
3606 // The bcp has already been incremented. Just need to dispatch to | |
3607 // next instruction. | |
3608 __ dispatch_next(vtos); | |
3609 } | |
3610 | |
3611 | |
3612 void TemplateTable::monitorexit() { | |
3613 transition(atos, vtos); | |
3614 | |
3615 // check for NULL object | |
3616 __ null_check(rax); | |
3617 | |
3618 const Address monitor_block_top( | |
3619 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); | |
3620 const Address monitor_block_bot( | |
3621 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); | |
3622 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; | |
3623 | |
3624 Label found; | |
3625 | |
3626 // find matching slot | |
3627 { | |
3628 Label entry, loop; | |
304 | 3629 __ movptr(c_rarg1, monitor_block_top); // points to current entry, |
0 | 3630 // starting with top-most entry |
304 | 3631 __ lea(c_rarg2, monitor_block_bot); // points to word before bottom |
0 | 3632 // of monitor block |
3633 __ jmpb(entry); | |
3634 | |
3635 __ bind(loop); | |
3636 // check if current entry is for same object | |
304 | 3637 __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); |
0 | 3638 // if same object then stop searching |
3639 __ jcc(Assembler::equal, found); | |
3640 // otherwise advance to next entry | |
304 | 3641 __ addptr(c_rarg1, entry_size); |
0 | 3642 __ bind(entry); |
3643 // check if bottom reached | |
304 | 3644 __ cmpptr(c_rarg1, c_rarg2); |
0 | 3645 // if not at bottom then check this entry |
3646 __ jcc(Assembler::notEqual, loop); | |
3647 } | |
3648 | |
3649 // error handling. Unlocking was not block-structured | |
3650 __ call_VM(noreg, CAST_FROM_FN_PTR(address, | |
3651 InterpreterRuntime::throw_illegal_monitor_state_exception)); | |
3652 __ should_not_reach_here(); | |
3653 | |
3654 // call run-time routine | |
3655 // rsi: points to monitor entry | |
3656 __ bind(found); | |
3657 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps) | |
3658 __ unlock_object(c_rarg1); | |
3659 __ pop_ptr(rax); // discard object | |
3660 } | |
3661 | |
3662 | |
3663 // Wide instructions | |
3664 void TemplateTable::wide() { | |
3665 transition(vtos, vtos); | |
3666 __ load_unsigned_byte(rbx, at_bcp(1)); | |
3667 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point)); | |
3668 __ jmp(Address(rscratch1, rbx, Address::times_8)); | |
3669 // Note: the r13 increment step is part of the individual wide | |
3670 // bytecode implementations | |
3671 } | |
3672 | |
3673 | |
3674 // Multi arrays | |
3675 void TemplateTable::multianewarray() { | |
3676 transition(vtos, atos); | |
3677 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions | |
3678 // last dim is on top of stack; we want address of first one: | |
3679 // first_addr = last_addr + (ndims - 1) * wordSize | |
304 | 3680 __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize)); |
0 | 3681 call_VM(rax, |
3682 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), | |
3683 c_rarg1); | |
3684 __ load_unsigned_byte(rbx, at_bcp(3)); | |
304 | 3685 __ lea(rsp, Address(rsp, rbx, Address::times_8)); |
0 | 3686 } |
304 | 3687 #endif // !CC_INTERP |