Mercurial > hg > truffle
annotate src/cpu/x86/vm/templateTable_x86_64.cpp @ 344:6aae2f9d0294
Merge
author | ysr |
---|---|
date | Thu, 12 Jun 2008 13:50:55 -0700 |
parents | 37f87013dfd8 feeb96a45707 |
children | 1ee8caae33af |
rev | line source |
---|---|
0 | 1 /* |
2 * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. | |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
4 * | |
5 * This code is free software; you can redistribute it and/or modify it | |
6 * under the terms of the GNU General Public License version 2 only, as | |
7 * published by the Free Software Foundation. | |
8 * | |
9 * This code is distributed in the hope that it will be useful, but WITHOUT | |
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
12 * version 2 for more details (a copy is included in the LICENSE file that | |
13 * accompanied this code). | |
14 * | |
15 * You should have received a copy of the GNU General Public License version | |
16 * 2 along with this work; if not, write to the Free Software Foundation, | |
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
18 * | |
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
20 * CA 95054 USA or visit www.sun.com if you need additional information or | |
21 * have any questions. | |
22 * | |
23 */ | |
24 | |
25 #include "incls/_precompiled.incl" | |
26 #include "incls/_templateTable_x86_64.cpp.incl" | |
27 | |
28 #define __ _masm-> | |
29 | |
30 // Platform-dependent initialization | |
31 | |
32 void TemplateTable::pd_initialize() { | |
33 // No amd64 specific initialization | |
34 } | |
35 | |
36 // Address computation: local variables | |
37 | |
38 static inline Address iaddress(int n) { | |
39 return Address(r14, Interpreter::local_offset_in_bytes(n)); | |
40 } | |
41 | |
42 static inline Address laddress(int n) { | |
43 return iaddress(n + 1); | |
44 } | |
45 | |
46 static inline Address faddress(int n) { | |
47 return iaddress(n); | |
48 } | |
49 | |
50 static inline Address daddress(int n) { | |
51 return laddress(n); | |
52 } | |
53 | |
54 static inline Address aaddress(int n) { | |
55 return iaddress(n); | |
56 } | |
57 | |
58 static inline Address iaddress(Register r) { | |
59 return Address(r14, r, Address::times_8, Interpreter::value_offset_in_bytes()); | |
60 } | |
61 | |
62 static inline Address laddress(Register r) { | |
63 return Address(r14, r, Address::times_8, Interpreter::local_offset_in_bytes(1)); | |
64 } | |
65 | |
66 static inline Address faddress(Register r) { | |
67 return iaddress(r); | |
68 } | |
69 | |
70 static inline Address daddress(Register r) { | |
71 return laddress(r); | |
72 } | |
73 | |
74 static inline Address aaddress(Register r) { | |
75 return iaddress(r); | |
76 } | |
77 | |
78 static inline Address at_rsp() { | |
79 return Address(rsp, 0); | |
80 } | |
81 | |
82 // At top of Java expression stack which may be different than esp(). It | |
83 // isn't for category 1 objects. | |
84 static inline Address at_tos () { | |
85 return Address(rsp, Interpreter::expr_offset_in_bytes(0)); | |
86 } | |
87 | |
88 static inline Address at_tos_p1() { | |
89 return Address(rsp, Interpreter::expr_offset_in_bytes(1)); | |
90 } | |
91 | |
92 static inline Address at_tos_p2() { | |
93 return Address(rsp, Interpreter::expr_offset_in_bytes(2)); | |
94 } | |
95 | |
96 static inline Address at_tos_p3() { | |
97 return Address(rsp, Interpreter::expr_offset_in_bytes(3)); | |
98 } | |
99 | |
100 // Condition conversion | |
101 static Assembler::Condition j_not(TemplateTable::Condition cc) { | |
102 switch (cc) { | |
103 case TemplateTable::equal : return Assembler::notEqual; | |
104 case TemplateTable::not_equal : return Assembler::equal; | |
105 case TemplateTable::less : return Assembler::greaterEqual; | |
106 case TemplateTable::less_equal : return Assembler::greater; | |
107 case TemplateTable::greater : return Assembler::lessEqual; | |
108 case TemplateTable::greater_equal: return Assembler::less; | |
109 } | |
110 ShouldNotReachHere(); | |
111 return Assembler::zero; | |
112 } | |
113 | |
114 | |
115 // Miscelaneous helper routines | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
116 // Store an oop (or NULL) at the address described by obj. |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
117 // If val == noreg this means store a NULL |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
118 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
119 static void do_oop_store(InterpreterMacroAssembler* _masm, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
120 Address obj, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
121 Register val, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
122 BarrierSet::Name barrier, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
123 bool precise) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
124 assert(val == noreg || val == rax, "parameter is just for looks"); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
125 switch (barrier) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
126 #ifndef SERIALGC |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
127 case BarrierSet::G1SATBCT: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
128 case BarrierSet::G1SATBCTLogging: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
129 { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
130 // flatten object address if needed |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
131 if (obj.index() == noreg && obj.disp() == 0) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
132 if (obj.base() != rdx) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
133 __ movq(rdx, obj.base()); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
134 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
135 } else { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
136 __ leaq(rdx, obj); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
137 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
138 __ g1_write_barrier_pre(rdx, r8, rbx, val != noreg); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
139 if (val == noreg) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
140 __ store_heap_oop(Address(rdx, 0), NULL_WORD); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
141 } else { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
142 __ store_heap_oop(Address(rdx, 0), val); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
143 __ g1_write_barrier_post(rdx, val, r8, rbx); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
144 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
145 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
146 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
147 break; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
148 #endif // SERIALGC |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
149 case BarrierSet::CardTableModRef: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
150 case BarrierSet::CardTableExtension: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
151 { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
152 if (val == noreg) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
153 __ store_heap_oop(obj, NULL_WORD); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
154 } else { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
155 __ store_heap_oop(obj, val); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
156 // flatten object address if needed |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
157 if (!precise || (obj.index() == noreg && obj.disp() == 0)) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
158 __ store_check(obj.base()); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
159 } else { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
160 __ leaq(rdx, obj); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
161 __ store_check(rdx); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
162 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
163 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
164 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
165 break; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
166 case BarrierSet::ModRef: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
167 case BarrierSet::Other: |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
168 if (val == noreg) { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
169 __ store_heap_oop(obj, NULL_WORD); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
170 } else { |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
171 __ store_heap_oop(obj, val); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
172 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
173 break; |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
174 default : |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
175 ShouldNotReachHere(); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
176 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
177 } |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
178 } |
0 | 179 |
180 Address TemplateTable::at_bcp(int offset) { | |
181 assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); | |
182 return Address(r13, offset); | |
183 } | |
184 | |
185 void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc, | |
186 Register scratch, | |
187 bool load_bc_into_scratch/*=true*/) { | |
188 if (!RewriteBytecodes) { | |
189 return; | |
190 } | |
191 // the pair bytecodes have already done the load. | |
192 if (load_bc_into_scratch) { | |
193 __ movl(bc, bytecode); | |
194 } | |
195 Label patch_done; | |
196 if (JvmtiExport::can_post_breakpoint()) { | |
197 Label fast_patch; | |
198 // if a breakpoint is present we can't rewrite the stream directly | |
199 __ movzbl(scratch, at_bcp(0)); | |
200 __ cmpl(scratch, Bytecodes::_breakpoint); | |
201 __ jcc(Assembler::notEqual, fast_patch); | |
202 __ get_method(scratch); | |
203 // Let breakpoint table handling rewrite to quicker bytecode | |
204 __ call_VM(noreg, | |
205 CAST_FROM_FN_PTR(address, | |
206 InterpreterRuntime::set_original_bytecode_at), | |
207 scratch, r13, bc); | |
208 #ifndef ASSERT | |
209 __ jmpb(patch_done); | |
210 __ bind(fast_patch); | |
211 } | |
212 #else | |
213 __ jmp(patch_done); | |
214 __ bind(fast_patch); | |
215 } | |
216 Label okay; | |
217 __ load_unsigned_byte(scratch, at_bcp(0)); | |
218 __ cmpl(scratch, (int) Bytecodes::java_code(bytecode)); | |
219 __ jcc(Assembler::equal, okay); | |
220 __ cmpl(scratch, bc); | |
221 __ jcc(Assembler::equal, okay); | |
222 __ stop("patching the wrong bytecode"); | |
223 __ bind(okay); | |
224 #endif | |
225 // patch bytecode | |
226 __ movb(at_bcp(0), bc); | |
227 __ bind(patch_done); | |
228 } | |
229 | |
230 | |
231 // Individual instructions | |
232 | |
233 void TemplateTable::nop() { | |
234 transition(vtos, vtos); | |
235 // nothing to do | |
236 } | |
237 | |
238 void TemplateTable::shouldnotreachhere() { | |
239 transition(vtos, vtos); | |
240 __ stop("shouldnotreachhere bytecode"); | |
241 } | |
242 | |
243 void TemplateTable::aconst_null() { | |
244 transition(vtos, atos); | |
245 __ xorl(rax, rax); | |
246 } | |
247 | |
248 void TemplateTable::iconst(int value) { | |
249 transition(vtos, itos); | |
250 if (value == 0) { | |
251 __ xorl(rax, rax); | |
252 } else { | |
253 __ movl(rax, value); | |
254 } | |
255 } | |
256 | |
257 void TemplateTable::lconst(int value) { | |
258 transition(vtos, ltos); | |
259 if (value == 0) { | |
260 __ xorl(rax, rax); | |
261 } else { | |
262 __ movl(rax, value); | |
263 } | |
264 } | |
265 | |
266 void TemplateTable::fconst(int value) { | |
267 transition(vtos, ftos); | |
268 static float one = 1.0f, two = 2.0f; | |
269 switch (value) { | |
270 case 0: | |
271 __ xorps(xmm0, xmm0); | |
272 break; | |
273 case 1: | |
274 __ movflt(xmm0, ExternalAddress((address) &one)); | |
275 break; | |
276 case 2: | |
277 __ movflt(xmm0, ExternalAddress((address) &two)); | |
278 break; | |
279 default: | |
280 ShouldNotReachHere(); | |
281 break; | |
282 } | |
283 } | |
284 | |
285 void TemplateTable::dconst(int value) { | |
286 transition(vtos, dtos); | |
287 static double one = 1.0; | |
288 switch (value) { | |
289 case 0: | |
290 __ xorpd(xmm0, xmm0); | |
291 break; | |
292 case 1: | |
293 __ movdbl(xmm0, ExternalAddress((address) &one)); | |
294 break; | |
295 default: | |
296 ShouldNotReachHere(); | |
297 break; | |
298 } | |
299 } | |
300 | |
301 void TemplateTable::bipush() { | |
302 transition(vtos, itos); | |
303 __ load_signed_byte(rax, at_bcp(1)); | |
304 } | |
305 | |
306 void TemplateTable::sipush() { | |
307 transition(vtos, itos); | |
308 __ load_unsigned_word(rax, at_bcp(1)); | |
309 __ bswapl(rax); | |
310 __ sarl(rax, 16); | |
311 } | |
312 | |
313 void TemplateTable::ldc(bool wide) { | |
314 transition(vtos, vtos); | |
315 Label call_ldc, notFloat, notClass, Done; | |
316 | |
317 if (wide) { | |
318 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); | |
319 } else { | |
320 __ load_unsigned_byte(rbx, at_bcp(1)); | |
321 } | |
322 | |
323 __ get_cpool_and_tags(rcx, rax); | |
324 const int base_offset = constantPoolOopDesc::header_size() * wordSize; | |
325 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; | |
326 | |
327 // get type | |
328 __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset)); | |
329 | |
330 // unresolved string - get the resolved string | |
331 __ cmpl(rdx, JVM_CONSTANT_UnresolvedString); | |
332 __ jccb(Assembler::equal, call_ldc); | |
333 | |
334 // unresolved class - get the resolved class | |
335 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass); | |
336 __ jccb(Assembler::equal, call_ldc); | |
337 | |
338 // unresolved class in error state - call into runtime to throw the error | |
339 // from the first resolution attempt | |
340 __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError); | |
341 __ jccb(Assembler::equal, call_ldc); | |
342 | |
343 // resolved class - need to call vm to get java mirror of the class | |
344 __ cmpl(rdx, JVM_CONSTANT_Class); | |
345 __ jcc(Assembler::notEqual, notClass); | |
346 | |
347 __ bind(call_ldc); | |
348 __ movl(c_rarg1, wide); | |
349 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1); | |
350 __ push_ptr(rax); | |
351 __ verify_oop(rax); | |
352 __ jmp(Done); | |
353 | |
354 __ bind(notClass); | |
355 __ cmpl(rdx, JVM_CONSTANT_Float); | |
356 __ jccb(Assembler::notEqual, notFloat); | |
357 // ftos | |
358 __ movflt(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); | |
359 __ push_f(); | |
360 __ jmp(Done); | |
361 | |
362 __ bind(notFloat); | |
363 #ifdef ASSERT | |
364 { | |
365 Label L; | |
366 __ cmpl(rdx, JVM_CONSTANT_Integer); | |
367 __ jcc(Assembler::equal, L); | |
368 __ cmpl(rdx, JVM_CONSTANT_String); | |
369 __ jcc(Assembler::equal, L); | |
370 __ stop("unexpected tag type in ldc"); | |
371 __ bind(L); | |
372 } | |
373 #endif | |
374 // atos and itos | |
375 Label isOop; | |
376 __ cmpl(rdx, JVM_CONSTANT_Integer); | |
377 __ jcc(Assembler::notEqual, isOop); | |
378 __ movl(rax, Address(rcx, rbx, Address::times_8, base_offset)); | |
379 __ push_i(rax); | |
380 __ jmp(Done); | |
381 | |
382 __ bind(isOop); | |
383 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset)); | |
384 __ push_ptr(rax); | |
385 | |
386 if (VerifyOops) { | |
387 __ verify_oop(rax); | |
388 } | |
389 | |
390 __ bind(Done); | |
391 } | |
392 | |
393 void TemplateTable::ldc2_w() { | |
394 transition(vtos, vtos); | |
395 Label Long, Done; | |
396 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); | |
397 | |
398 __ get_cpool_and_tags(rcx, rax); | |
399 const int base_offset = constantPoolOopDesc::header_size() * wordSize; | |
400 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; | |
401 | |
402 // get type | |
403 __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), | |
404 JVM_CONSTANT_Double); | |
405 __ jccb(Assembler::notEqual, Long); | |
406 // dtos | |
407 __ movdbl(xmm0, Address(rcx, rbx, Address::times_8, base_offset)); | |
408 __ push_d(); | |
409 __ jmpb(Done); | |
410 | |
411 __ bind(Long); | |
412 // ltos | |
413 __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset)); | |
414 __ push_l(); | |
415 | |
416 __ bind(Done); | |
417 } | |
418 | |
419 void TemplateTable::locals_index(Register reg, int offset) { | |
420 __ load_unsigned_byte(reg, at_bcp(offset)); | |
421 __ negq(reg); | |
422 if (TaggedStackInterpreter) __ shlq(reg, 1); // index = index*2 | |
423 } | |
424 | |
425 void TemplateTable::iload() { | |
426 transition(vtos, itos); | |
427 if (RewriteFrequentPairs) { | |
428 Label rewrite, done; | |
429 const Register bc = c_rarg3; | |
430 assert(rbx != bc, "register damaged"); | |
431 | |
432 // get next byte | |
433 __ load_unsigned_byte(rbx, | |
434 at_bcp(Bytecodes::length_for(Bytecodes::_iload))); | |
435 // if _iload, wait to rewrite to iload2. We only want to rewrite the | |
436 // last two iloads in a pair. Comparing against fast_iload means that | |
437 // the next bytecode is neither an iload or a caload, and therefore | |
438 // an iload pair. | |
439 __ cmpl(rbx, Bytecodes::_iload); | |
440 __ jcc(Assembler::equal, done); | |
441 | |
442 __ cmpl(rbx, Bytecodes::_fast_iload); | |
443 __ movl(bc, Bytecodes::_fast_iload2); | |
444 __ jccb(Assembler::equal, rewrite); | |
445 | |
446 // if _caload, rewrite to fast_icaload | |
447 __ cmpl(rbx, Bytecodes::_caload); | |
448 __ movl(bc, Bytecodes::_fast_icaload); | |
449 __ jccb(Assembler::equal, rewrite); | |
450 | |
451 // rewrite so iload doesn't check again. | |
452 __ movl(bc, Bytecodes::_fast_iload); | |
453 | |
454 // rewrite | |
455 // bc: fast bytecode | |
456 __ bind(rewrite); | |
457 patch_bytecode(Bytecodes::_iload, bc, rbx, false); | |
458 __ bind(done); | |
459 } | |
460 | |
461 // Get the local value into tos | |
462 locals_index(rbx); | |
463 __ movl(rax, iaddress(rbx)); | |
464 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
465 } | |
466 | |
467 void TemplateTable::fast_iload2() { | |
468 transition(vtos, itos); | |
469 locals_index(rbx); | |
470 __ movl(rax, iaddress(rbx)); | |
471 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
472 __ push(itos); | |
473 locals_index(rbx, 3); | |
474 __ movl(rax, iaddress(rbx)); | |
475 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
476 } | |
477 | |
478 void TemplateTable::fast_iload() { | |
479 transition(vtos, itos); | |
480 locals_index(rbx); | |
481 __ movl(rax, iaddress(rbx)); | |
482 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
483 } | |
484 | |
485 void TemplateTable::lload() { | |
486 transition(vtos, ltos); | |
487 locals_index(rbx); | |
488 __ movq(rax, laddress(rbx)); | |
489 debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); | |
490 } | |
491 | |
492 void TemplateTable::fload() { | |
493 transition(vtos, ftos); | |
494 locals_index(rbx); | |
495 __ movflt(xmm0, faddress(rbx)); | |
496 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
497 } | |
498 | |
499 void TemplateTable::dload() { | |
500 transition(vtos, dtos); | |
501 locals_index(rbx); | |
502 __ movdbl(xmm0, daddress(rbx)); | |
503 debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); | |
504 } | |
505 | |
506 void TemplateTable::aload() { | |
507 transition(vtos, atos); | |
508 locals_index(rbx); | |
509 __ movq(rax, aaddress(rbx)); | |
510 debug_only(__ verify_local_tag(frame::TagReference, rbx)); | |
511 } | |
512 | |
513 void TemplateTable::locals_index_wide(Register reg) { | |
514 __ movl(reg, at_bcp(2)); | |
515 __ bswapl(reg); | |
516 __ shrl(reg, 16); | |
517 __ negq(reg); | |
518 if (TaggedStackInterpreter) __ shlq(reg, 1); // index = index*2 | |
519 } | |
520 | |
521 void TemplateTable::wide_iload() { | |
522 transition(vtos, itos); | |
523 locals_index_wide(rbx); | |
524 __ movl(rax, iaddress(rbx)); | |
525 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
526 } | |
527 | |
528 void TemplateTable::wide_lload() { | |
529 transition(vtos, ltos); | |
530 locals_index_wide(rbx); | |
531 __ movq(rax, laddress(rbx)); | |
532 debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); | |
533 } | |
534 | |
535 void TemplateTable::wide_fload() { | |
536 transition(vtos, ftos); | |
537 locals_index_wide(rbx); | |
538 __ movflt(xmm0, faddress(rbx)); | |
539 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
540 } | |
541 | |
542 void TemplateTable::wide_dload() { | |
543 transition(vtos, dtos); | |
544 locals_index_wide(rbx); | |
545 __ movdbl(xmm0, daddress(rbx)); | |
546 debug_only(__ verify_local_tag(frame::TagCategory2, rbx)); | |
547 } | |
548 | |
549 void TemplateTable::wide_aload() { | |
550 transition(vtos, atos); | |
551 locals_index_wide(rbx); | |
552 __ movq(rax, aaddress(rbx)); | |
553 debug_only(__ verify_local_tag(frame::TagReference, rbx)); | |
554 } | |
555 | |
556 void TemplateTable::index_check(Register array, Register index) { | |
557 // destroys rbx | |
558 // check array | |
559 __ null_check(array, arrayOopDesc::length_offset_in_bytes()); | |
560 // sign extend index for use by indexed load | |
561 __ movslq(index, index); | |
562 // check index | |
563 __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes())); | |
564 if (index != rbx) { | |
565 // ??? convention: move aberrant index into ebx for exception message | |
566 assert(rbx != array, "different registers"); | |
567 __ movl(rbx, index); | |
568 } | |
569 __ jump_cc(Assembler::aboveEqual, | |
570 ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry)); | |
571 } | |
572 | |
573 void TemplateTable::iaload() { | |
574 transition(itos, itos); | |
575 __ pop_ptr(rdx); | |
576 // eax: index | |
577 // rdx: array | |
578 index_check(rdx, rax); // kills rbx | |
579 __ movl(rax, Address(rdx, rax, | |
580 Address::times_4, | |
581 arrayOopDesc::base_offset_in_bytes(T_INT))); | |
582 } | |
583 | |
584 void TemplateTable::laload() { | |
585 transition(itos, ltos); | |
586 __ pop_ptr(rdx); | |
587 // eax: index | |
588 // rdx: array | |
589 index_check(rdx, rax); // kills rbx | |
590 __ movq(rax, Address(rdx, rbx, | |
591 Address::times_8, | |
592 arrayOopDesc::base_offset_in_bytes(T_LONG))); | |
593 } | |
594 | |
595 void TemplateTable::faload() { | |
596 transition(itos, ftos); | |
597 __ pop_ptr(rdx); | |
598 // eax: index | |
599 // rdx: array | |
600 index_check(rdx, rax); // kills rbx | |
601 __ movflt(xmm0, Address(rdx, rax, | |
602 Address::times_4, | |
603 arrayOopDesc::base_offset_in_bytes(T_FLOAT))); | |
604 } | |
605 | |
606 void TemplateTable::daload() { | |
607 transition(itos, dtos); | |
608 __ pop_ptr(rdx); | |
609 // eax: index | |
610 // rdx: array | |
611 index_check(rdx, rax); // kills rbx | |
612 __ movdbl(xmm0, Address(rdx, rax, | |
613 Address::times_8, | |
614 arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); | |
615 } | |
616 | |
617 void TemplateTable::aaload() { | |
618 transition(itos, atos); | |
619 __ pop_ptr(rdx); | |
620 // eax: index | |
621 // rdx: array | |
622 index_check(rdx, rax); // kills rbx | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
623 __ load_heap_oop(rax, Address(rdx, rax, |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
624 UseCompressedOops ? Address::times_4 : Address::times_8, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
625 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); |
0 | 626 } |
627 | |
628 void TemplateTable::baload() { | |
629 transition(itos, itos); | |
630 __ pop_ptr(rdx); | |
631 // eax: index | |
632 // rdx: array | |
633 index_check(rdx, rax); // kills rbx | |
634 __ load_signed_byte(rax, | |
635 Address(rdx, rax, | |
636 Address::times_1, | |
637 arrayOopDesc::base_offset_in_bytes(T_BYTE))); | |
638 } | |
639 | |
640 void TemplateTable::caload() { | |
641 transition(itos, itos); | |
642 __ pop_ptr(rdx); | |
643 // eax: index | |
644 // rdx: array | |
645 index_check(rdx, rax); // kills rbx | |
646 __ load_unsigned_word(rax, | |
647 Address(rdx, rax, | |
648 Address::times_2, | |
649 arrayOopDesc::base_offset_in_bytes(T_CHAR))); | |
650 } | |
651 | |
652 // iload followed by caload frequent pair | |
653 void TemplateTable::fast_icaload() { | |
654 transition(vtos, itos); | |
655 // load index out of locals | |
656 locals_index(rbx); | |
657 __ movl(rax, iaddress(rbx)); | |
658 debug_only(__ verify_local_tag(frame::TagValue, rbx)); | |
659 | |
660 // eax: index | |
661 // rdx: array | |
662 __ pop_ptr(rdx); | |
663 index_check(rdx, rax); // kills rbx | |
664 __ load_unsigned_word(rax, | |
665 Address(rdx, rax, | |
666 Address::times_2, | |
667 arrayOopDesc::base_offset_in_bytes(T_CHAR))); | |
668 } | |
669 | |
670 void TemplateTable::saload() { | |
671 transition(itos, itos); | |
672 __ pop_ptr(rdx); | |
673 // eax: index | |
674 // rdx: array | |
675 index_check(rdx, rax); // kills rbx | |
676 __ load_signed_word(rax, | |
677 Address(rdx, rax, | |
678 Address::times_2, | |
679 arrayOopDesc::base_offset_in_bytes(T_SHORT))); | |
680 } | |
681 | |
682 void TemplateTable::iload(int n) { | |
683 transition(vtos, itos); | |
684 __ movl(rax, iaddress(n)); | |
685 debug_only(__ verify_local_tag(frame::TagValue, n)); | |
686 } | |
687 | |
688 void TemplateTable::lload(int n) { | |
689 transition(vtos, ltos); | |
690 __ movq(rax, laddress(n)); | |
691 debug_only(__ verify_local_tag(frame::TagCategory2, n)); | |
692 } | |
693 | |
694 void TemplateTable::fload(int n) { | |
695 transition(vtos, ftos); | |
696 __ movflt(xmm0, faddress(n)); | |
697 debug_only(__ verify_local_tag(frame::TagValue, n)); | |
698 } | |
699 | |
700 void TemplateTable::dload(int n) { | |
701 transition(vtos, dtos); | |
702 __ movdbl(xmm0, daddress(n)); | |
703 debug_only(__ verify_local_tag(frame::TagCategory2, n)); | |
704 } | |
705 | |
706 void TemplateTable::aload(int n) { | |
707 transition(vtos, atos); | |
708 __ movq(rax, aaddress(n)); | |
709 debug_only(__ verify_local_tag(frame::TagReference, n)); | |
710 } | |
711 | |
712 void TemplateTable::aload_0() { | |
713 transition(vtos, atos); | |
714 // According to bytecode histograms, the pairs: | |
715 // | |
716 // _aload_0, _fast_igetfield | |
717 // _aload_0, _fast_agetfield | |
718 // _aload_0, _fast_fgetfield | |
719 // | |
720 // occur frequently. If RewriteFrequentPairs is set, the (slow) | |
721 // _aload_0 bytecode checks if the next bytecode is either | |
722 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then | |
723 // rewrites the current bytecode into a pair bytecode; otherwise it | |
724 // rewrites the current bytecode into _fast_aload_0 that doesn't do | |
725 // the pair check anymore. | |
726 // | |
727 // Note: If the next bytecode is _getfield, the rewrite must be | |
728 // delayed, otherwise we may miss an opportunity for a pair. | |
729 // | |
730 // Also rewrite frequent pairs | |
731 // aload_0, aload_1 | |
732 // aload_0, iload_1 | |
733 // These bytecodes with a small amount of code are most profitable | |
734 // to rewrite | |
735 if (RewriteFrequentPairs) { | |
736 Label rewrite, done; | |
737 const Register bc = c_rarg3; | |
738 assert(rbx != bc, "register damaged"); | |
739 // get next byte | |
740 __ load_unsigned_byte(rbx, | |
741 at_bcp(Bytecodes::length_for(Bytecodes::_aload_0))); | |
742 | |
743 // do actual aload_0 | |
744 aload(0); | |
745 | |
746 // if _getfield then wait with rewrite | |
747 __ cmpl(rbx, Bytecodes::_getfield); | |
748 __ jcc(Assembler::equal, done); | |
749 | |
750 // if _igetfield then reqrite to _fast_iaccess_0 | |
751 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == | |
752 Bytecodes::_aload_0, | |
753 "fix bytecode definition"); | |
754 __ cmpl(rbx, Bytecodes::_fast_igetfield); | |
755 __ movl(bc, Bytecodes::_fast_iaccess_0); | |
756 __ jccb(Assembler::equal, rewrite); | |
757 | |
758 // if _agetfield then reqrite to _fast_aaccess_0 | |
759 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == | |
760 Bytecodes::_aload_0, | |
761 "fix bytecode definition"); | |
762 __ cmpl(rbx, Bytecodes::_fast_agetfield); | |
763 __ movl(bc, Bytecodes::_fast_aaccess_0); | |
764 __ jccb(Assembler::equal, rewrite); | |
765 | |
766 // if _fgetfield then reqrite to _fast_faccess_0 | |
767 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == | |
768 Bytecodes::_aload_0, | |
769 "fix bytecode definition"); | |
770 __ cmpl(rbx, Bytecodes::_fast_fgetfield); | |
771 __ movl(bc, Bytecodes::_fast_faccess_0); | |
772 __ jccb(Assembler::equal, rewrite); | |
773 | |
774 // else rewrite to _fast_aload0 | |
775 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == | |
776 Bytecodes::_aload_0, | |
777 "fix bytecode definition"); | |
778 __ movl(bc, Bytecodes::_fast_aload_0); | |
779 | |
780 // rewrite | |
781 // bc: fast bytecode | |
782 __ bind(rewrite); | |
783 patch_bytecode(Bytecodes::_aload_0, bc, rbx, false); | |
784 | |
785 __ bind(done); | |
786 } else { | |
787 aload(0); | |
788 } | |
789 } | |
790 | |
791 void TemplateTable::istore() { | |
792 transition(itos, vtos); | |
793 locals_index(rbx); | |
794 __ movl(iaddress(rbx), rax); | |
795 __ tag_local(frame::TagValue, rbx); | |
796 } | |
797 | |
798 void TemplateTable::lstore() { | |
799 transition(ltos, vtos); | |
800 locals_index(rbx); | |
801 __ movq(laddress(rbx), rax); | |
802 __ tag_local(frame::TagCategory2, rbx); | |
803 } | |
804 | |
805 void TemplateTable::fstore() { | |
806 transition(ftos, vtos); | |
807 locals_index(rbx); | |
808 __ movflt(faddress(rbx), xmm0); | |
809 __ tag_local(frame::TagValue, rbx); | |
810 } | |
811 | |
812 void TemplateTable::dstore() { | |
813 transition(dtos, vtos); | |
814 locals_index(rbx); | |
815 __ movdbl(daddress(rbx), xmm0); | |
816 __ tag_local(frame::TagCategory2, rbx); | |
817 } | |
818 | |
819 void TemplateTable::astore() { | |
820 transition(vtos, vtos); | |
821 __ pop_ptr(rax, rdx); // will need to pop tag too | |
822 locals_index(rbx); | |
823 __ movq(aaddress(rbx), rax); | |
824 __ tag_local(rdx, rbx); // store tag from stack, might be returnAddr | |
825 } | |
826 | |
827 void TemplateTable::wide_istore() { | |
828 transition(vtos, vtos); | |
829 __ pop_i(); | |
830 locals_index_wide(rbx); | |
831 __ movl(iaddress(rbx), rax); | |
832 __ tag_local(frame::TagValue, rbx); | |
833 } | |
834 | |
835 void TemplateTable::wide_lstore() { | |
836 transition(vtos, vtos); | |
837 __ pop_l(); | |
838 locals_index_wide(rbx); | |
839 __ movq(laddress(rbx), rax); | |
840 __ tag_local(frame::TagCategory2, rbx); | |
841 } | |
842 | |
843 void TemplateTable::wide_fstore() { | |
844 transition(vtos, vtos); | |
845 __ pop_f(); | |
846 locals_index_wide(rbx); | |
847 __ movflt(faddress(rbx), xmm0); | |
848 __ tag_local(frame::TagValue, rbx); | |
849 } | |
850 | |
851 void TemplateTable::wide_dstore() { | |
852 transition(vtos, vtos); | |
853 __ pop_d(); | |
854 locals_index_wide(rbx); | |
855 __ movdbl(daddress(rbx), xmm0); | |
856 __ tag_local(frame::TagCategory2, rbx); | |
857 } | |
858 | |
859 void TemplateTable::wide_astore() { | |
860 transition(vtos, vtos); | |
861 __ pop_ptr(rax, rdx); // will need to pop tag too | |
862 locals_index_wide(rbx); | |
863 __ movq(aaddress(rbx), rax); | |
864 __ tag_local(rdx, rbx); // store tag from stack, might be returnAddr | |
865 } | |
866 | |
867 void TemplateTable::iastore() { | |
868 transition(itos, vtos); | |
869 __ pop_i(rbx); | |
870 __ pop_ptr(rdx); | |
871 // eax: value | |
872 // ebx: index | |
873 // rdx: array | |
874 index_check(rdx, rbx); // prefer index in ebx | |
875 __ movl(Address(rdx, rbx, | |
876 Address::times_4, | |
877 arrayOopDesc::base_offset_in_bytes(T_INT)), | |
878 rax); | |
879 } | |
880 | |
881 void TemplateTable::lastore() { | |
882 transition(ltos, vtos); | |
883 __ pop_i(rbx); | |
884 __ pop_ptr(rdx); | |
885 // rax: value | |
886 // ebx: index | |
887 // rdx: array | |
888 index_check(rdx, rbx); // prefer index in ebx | |
889 __ movq(Address(rdx, rbx, | |
890 Address::times_8, | |
891 arrayOopDesc::base_offset_in_bytes(T_LONG)), | |
892 rax); | |
893 } | |
894 | |
895 void TemplateTable::fastore() { | |
896 transition(ftos, vtos); | |
897 __ pop_i(rbx); | |
898 __ pop_ptr(rdx); | |
899 // xmm0: value | |
900 // ebx: index | |
901 // rdx: array | |
902 index_check(rdx, rbx); // prefer index in ebx | |
903 __ movflt(Address(rdx, rbx, | |
904 Address::times_4, | |
905 arrayOopDesc::base_offset_in_bytes(T_FLOAT)), | |
906 xmm0); | |
907 } | |
908 | |
909 void TemplateTable::dastore() { | |
910 transition(dtos, vtos); | |
911 __ pop_i(rbx); | |
912 __ pop_ptr(rdx); | |
913 // xmm0: value | |
914 // ebx: index | |
915 // rdx: array | |
916 index_check(rdx, rbx); // prefer index in ebx | |
917 __ movdbl(Address(rdx, rbx, | |
918 Address::times_8, | |
919 arrayOopDesc::base_offset_in_bytes(T_DOUBLE)), | |
920 xmm0); | |
921 } | |
922 | |
923 void TemplateTable::aastore() { | |
924 Label is_null, ok_is_subtype, done; | |
925 transition(vtos, vtos); | |
926 // stack: ..., array, index, value | |
927 __ movq(rax, at_tos()); // value | |
928 __ movl(rcx, at_tos_p1()); // index | |
929 __ movq(rdx, at_tos_p2()); // array | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
930 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
931 Address element_address(rdx, rcx, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
932 UseCompressedOops? Address::times_4 : Address::times_8, |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
933 arrayOopDesc::base_offset_in_bytes(T_OBJECT)); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
934 |
0 | 935 index_check(rdx, rcx); // kills rbx |
936 // do array store check - check for NULL value first | |
937 __ testq(rax, rax); | |
938 __ jcc(Assembler::zero, is_null); | |
939 | |
940 // Move subklass into rbx | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
941 __ load_klass(rbx, rax); |
0 | 942 // Move superklass into rax |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
943 __ load_klass(rax, rdx); |
0 | 944 __ movq(rax, Address(rax, |
945 sizeof(oopDesc) + | |
946 objArrayKlass::element_klass_offset_in_bytes())); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
947 // Compress array + index*oopSize + 12 into a single register. Frees rcx. |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
948 __ leaq(rdx, element_address); |
0 | 949 |
950 // Generate subtype check. Blows rcx, rdi | |
951 // Superklass in rax. Subklass in rbx. | |
952 __ gen_subtype_check(rbx, ok_is_subtype); | |
953 | |
954 // Come here on failure | |
955 // object is at TOS | |
956 __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry)); | |
957 | |
958 // Come here on success | |
959 __ bind(ok_is_subtype); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
960 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
961 // Get the value we will store |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
962 __ movq(rax, at_tos()); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
963 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
964 // Now store using the appropriate barrier |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
965 do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true); |
0 | 966 __ jmp(done); |
967 | |
968 // Have a NULL in rax, rdx=array, ecx=index. Store NULL at ary[idx] | |
969 __ bind(is_null); | |
970 __ profile_null_seen(rbx); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
971 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
972 // Store a NULL |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
973 do_oop_store(_masm, element_address, noreg, _bs->kind(), true); |
0 | 974 |
975 // Pop stack arguments | |
976 __ bind(done); | |
977 __ addq(rsp, 3 * Interpreter::stackElementSize()); | |
978 } | |
979 | |
980 void TemplateTable::bastore() { | |
981 transition(itos, vtos); | |
982 __ pop_i(rbx); | |
983 __ pop_ptr(rdx); | |
984 // eax: value | |
985 // ebx: index | |
986 // rdx: array | |
987 index_check(rdx, rbx); // prefer index in ebx | |
988 __ movb(Address(rdx, rbx, | |
989 Address::times_1, | |
990 arrayOopDesc::base_offset_in_bytes(T_BYTE)), | |
991 rax); | |
992 } | |
993 | |
994 void TemplateTable::castore() { | |
995 transition(itos, vtos); | |
996 __ pop_i(rbx); | |
997 __ pop_ptr(rdx); | |
998 // eax: value | |
999 // ebx: index | |
1000 // rdx: array | |
1001 index_check(rdx, rbx); // prefer index in ebx | |
1002 __ movw(Address(rdx, rbx, | |
1003 Address::times_2, | |
1004 arrayOopDesc::base_offset_in_bytes(T_CHAR)), | |
1005 rax); | |
1006 } | |
1007 | |
1008 void TemplateTable::sastore() { | |
1009 castore(); | |
1010 } | |
1011 | |
1012 void TemplateTable::istore(int n) { | |
1013 transition(itos, vtos); | |
1014 __ movl(iaddress(n), rax); | |
1015 __ tag_local(frame::TagValue, n); | |
1016 } | |
1017 | |
1018 void TemplateTable::lstore(int n) { | |
1019 transition(ltos, vtos); | |
1020 __ movq(laddress(n), rax); | |
1021 __ tag_local(frame::TagCategory2, n); | |
1022 } | |
1023 | |
1024 void TemplateTable::fstore(int n) { | |
1025 transition(ftos, vtos); | |
1026 __ movflt(faddress(n), xmm0); | |
1027 __ tag_local(frame::TagValue, n); | |
1028 } | |
1029 | |
1030 void TemplateTable::dstore(int n) { | |
1031 transition(dtos, vtos); | |
1032 __ movdbl(daddress(n), xmm0); | |
1033 __ tag_local(frame::TagCategory2, n); | |
1034 } | |
1035 | |
1036 void TemplateTable::astore(int n) { | |
1037 transition(vtos, vtos); | |
1038 __ pop_ptr(rax, rdx); | |
1039 __ movq(aaddress(n), rax); | |
1040 __ tag_local(rdx, n); | |
1041 } | |
1042 | |
1043 void TemplateTable::pop() { | |
1044 transition(vtos, vtos); | |
1045 __ addq(rsp, Interpreter::stackElementSize()); | |
1046 } | |
1047 | |
1048 void TemplateTable::pop2() { | |
1049 transition(vtos, vtos); | |
1050 __ addq(rsp, 2 * Interpreter::stackElementSize()); | |
1051 } | |
1052 | |
1053 void TemplateTable::dup() { | |
1054 transition(vtos, vtos); | |
1055 __ load_ptr_and_tag(0, rax, rdx); | |
1056 __ push_ptr(rax, rdx); | |
1057 // stack: ..., a, a | |
1058 } | |
1059 | |
1060 void TemplateTable::dup_x1() { | |
1061 transition(vtos, vtos); | |
1062 // stack: ..., a, b | |
1063 __ load_ptr_and_tag(0, rax, rdx); // load b | |
1064 __ load_ptr_and_tag(1, rcx, rbx); // load a | |
1065 __ store_ptr_and_tag(1, rax, rdx); // store b | |
1066 __ store_ptr_and_tag(0, rcx, rbx); // store a | |
1067 __ push_ptr(rax, rdx); // push b | |
1068 // stack: ..., b, a, b | |
1069 } | |
1070 | |
1071 void TemplateTable::dup_x2() { | |
1072 transition(vtos, vtos); | |
1073 // stack: ..., a, b, c | |
1074 __ load_ptr_and_tag(0, rax, rdx); // load c | |
1075 __ load_ptr_and_tag(2, rcx, rbx); // load a | |
1076 __ store_ptr_and_tag(2, rax, rdx); // store c in a | |
1077 __ push_ptr(rax, rdx); // push c | |
1078 // stack: ..., c, b, c, c | |
1079 __ load_ptr_and_tag(2, rax, rdx); // load b | |
1080 __ store_ptr_and_tag(2, rcx, rbx); // store a in b | |
1081 // stack: ..., c, a, c, c | |
1082 __ store_ptr_and_tag(1, rax, rdx); // store b in c | |
1083 // stack: ..., c, a, b, c | |
1084 } | |
1085 | |
1086 void TemplateTable::dup2() { | |
1087 transition(vtos, vtos); | |
1088 // stack: ..., a, b | |
1089 __ load_ptr_and_tag(1, rax, rdx); // load a | |
1090 __ push_ptr(rax, rdx); // push a | |
1091 __ load_ptr_and_tag(1, rax, rdx); // load b | |
1092 __ push_ptr(rax, rdx); // push b | |
1093 // stack: ..., a, b, a, b | |
1094 } | |
1095 | |
1096 void TemplateTable::dup2_x1() { | |
1097 transition(vtos, vtos); | |
1098 // stack: ..., a, b, c | |
1099 __ load_ptr_and_tag(0, rcx, rbx); // load c | |
1100 __ load_ptr_and_tag(1, rax, rdx); // load b | |
1101 __ push_ptr(rax, rdx); // push b | |
1102 __ push_ptr(rcx, rbx); // push c | |
1103 // stack: ..., a, b, c, b, c | |
1104 __ store_ptr_and_tag(3, rcx, rbx); // store c in b | |
1105 // stack: ..., a, c, c, b, c | |
1106 __ load_ptr_and_tag(4, rcx, rbx); // load a | |
1107 __ store_ptr_and_tag(2, rcx, rbx); // store a in 2nd c | |
1108 // stack: ..., a, c, a, b, c | |
1109 __ store_ptr_and_tag(4, rax, rdx); // store b in a | |
1110 // stack: ..., b, c, a, b, c | |
1111 } | |
1112 | |
1113 void TemplateTable::dup2_x2() { | |
1114 transition(vtos, vtos); | |
1115 // stack: ..., a, b, c, d | |
1116 __ load_ptr_and_tag(0, rcx, rbx); // load d | |
1117 __ load_ptr_and_tag(1, rax, rdx); // load c | |
1118 __ push_ptr(rax, rdx); // push c | |
1119 __ push_ptr(rcx, rbx); // push d | |
1120 // stack: ..., a, b, c, d, c, d | |
1121 __ load_ptr_and_tag(4, rax, rdx); // load b | |
1122 __ store_ptr_and_tag(2, rax, rdx); // store b in d | |
1123 __ store_ptr_and_tag(4, rcx, rbx); // store d in b | |
1124 // stack: ..., a, d, c, b, c, d | |
1125 __ load_ptr_and_tag(5, rcx, rbx); // load a | |
1126 __ load_ptr_and_tag(3, rax, rdx); // load c | |
1127 __ store_ptr_and_tag(3, rcx, rbx); // store a in c | |
1128 __ store_ptr_and_tag(5, rax, rdx); // store c in a | |
1129 // stack: ..., c, d, a, b, c, d | |
1130 } | |
1131 | |
1132 void TemplateTable::swap() { | |
1133 transition(vtos, vtos); | |
1134 // stack: ..., a, b | |
1135 __ load_ptr_and_tag(1, rcx, rbx); // load a | |
1136 __ load_ptr_and_tag(0, rax, rdx); // load b | |
1137 __ store_ptr_and_tag(0, rcx, rbx); // store a in b | |
1138 __ store_ptr_and_tag(1, rax, rdx); // store b in a | |
1139 // stack: ..., b, a | |
1140 } | |
1141 | |
1142 void TemplateTable::iop2(Operation op) { | |
1143 transition(itos, itos); | |
1144 switch (op) { | |
1145 case add : __ pop_i(rdx); __ addl (rax, rdx); break; | |
1146 case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break; | |
1147 case mul : __ pop_i(rdx); __ imull(rax, rdx); break; | |
1148 case _and : __ pop_i(rdx); __ andl (rax, rdx); break; | |
1149 case _or : __ pop_i(rdx); __ orl (rax, rdx); break; | |
1150 case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break; | |
1151 case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break; | |
1152 case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break; | |
1153 case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break; | |
1154 default : ShouldNotReachHere(); | |
1155 } | |
1156 } | |
1157 | |
1158 void TemplateTable::lop2(Operation op) { | |
1159 transition(ltos, ltos); | |
1160 switch (op) { | |
1161 case add : __ pop_l(rdx); __ addq (rax, rdx); break; | |
1162 case sub : __ movq(rdx, rax); __ pop_l(rax); __ subq (rax, rdx); break; | |
1163 case _and : __ pop_l(rdx); __ andq (rax, rdx); break; | |
1164 case _or : __ pop_l(rdx); __ orq (rax, rdx); break; | |
1165 case _xor : __ pop_l(rdx); __ xorq (rax, rdx); break; | |
1166 default : ShouldNotReachHere(); | |
1167 } | |
1168 } | |
1169 | |
1170 void TemplateTable::idiv() { | |
1171 transition(itos, itos); | |
1172 __ movl(rcx, rax); | |
1173 __ pop_i(rax); | |
1174 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If | |
1175 // they are not equal, one could do a normal division (no correction | |
1176 // needed), which may speed up this implementation for the common case. | |
1177 // (see also JVM spec., p.243 & p.271) | |
1178 __ corrected_idivl(rcx); | |
1179 } | |
1180 | |
1181 void TemplateTable::irem() { | |
1182 transition(itos, itos); | |
1183 __ movl(rcx, rax); | |
1184 __ pop_i(rax); | |
1185 // Note: could xor eax and ecx and compare with (-1 ^ min_int). If | |
1186 // they are not equal, one could do a normal division (no correction | |
1187 // needed), which may speed up this implementation for the common case. | |
1188 // (see also JVM spec., p.243 & p.271) | |
1189 __ corrected_idivl(rcx); | |
1190 __ movl(rax, rdx); | |
1191 } | |
1192 | |
1193 void TemplateTable::lmul() { | |
1194 transition(ltos, ltos); | |
1195 __ pop_l(rdx); | |
1196 __ imulq(rax, rdx); | |
1197 } | |
1198 | |
1199 void TemplateTable::ldiv() { | |
1200 transition(ltos, ltos); | |
1201 __ movq(rcx, rax); | |
1202 __ pop_l(rax); | |
1203 // generate explicit div0 check | |
1204 __ testq(rcx, rcx); | |
1205 __ jump_cc(Assembler::zero, | |
1206 ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); | |
1207 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If | |
1208 // they are not equal, one could do a normal division (no correction | |
1209 // needed), which may speed up this implementation for the common case. | |
1210 // (see also JVM spec., p.243 & p.271) | |
1211 __ corrected_idivq(rcx); // kills rbx | |
1212 } | |
1213 | |
1214 void TemplateTable::lrem() { | |
1215 transition(ltos, ltos); | |
1216 __ movq(rcx, rax); | |
1217 __ pop_l(rax); | |
1218 __ testq(rcx, rcx); | |
1219 __ jump_cc(Assembler::zero, | |
1220 ExternalAddress(Interpreter::_throw_ArithmeticException_entry)); | |
1221 // Note: could xor rax and rcx and compare with (-1 ^ min_int). If | |
1222 // they are not equal, one could do a normal division (no correction | |
1223 // needed), which may speed up this implementation for the common case. | |
1224 // (see also JVM spec., p.243 & p.271) | |
1225 __ corrected_idivq(rcx); // kills rbx | |
1226 __ movq(rax, rdx); | |
1227 } | |
1228 | |
1229 void TemplateTable::lshl() { | |
1230 transition(itos, ltos); | |
1231 __ movl(rcx, rax); // get shift count | |
1232 __ pop_l(rax); // get shift value | |
1233 __ shlq(rax); | |
1234 } | |
1235 | |
1236 void TemplateTable::lshr() { | |
1237 transition(itos, ltos); | |
1238 __ movl(rcx, rax); // get shift count | |
1239 __ pop_l(rax); // get shift value | |
1240 __ sarq(rax); | |
1241 } | |
1242 | |
1243 void TemplateTable::lushr() { | |
1244 transition(itos, ltos); | |
1245 __ movl(rcx, rax); // get shift count | |
1246 __ pop_l(rax); // get shift value | |
1247 __ shrq(rax); | |
1248 } | |
1249 | |
1250 void TemplateTable::fop2(Operation op) { | |
1251 transition(ftos, ftos); | |
1252 switch (op) { | |
1253 case add: | |
1254 __ addss(xmm0, at_rsp()); | |
1255 __ addq(rsp, Interpreter::stackElementSize()); | |
1256 break; | |
1257 case sub: | |
1258 __ movflt(xmm1, xmm0); | |
1259 __ pop_f(xmm0); | |
1260 __ subss(xmm0, xmm1); | |
1261 break; | |
1262 case mul: | |
1263 __ mulss(xmm0, at_rsp()); | |
1264 __ addq(rsp, Interpreter::stackElementSize()); | |
1265 break; | |
1266 case div: | |
1267 __ movflt(xmm1, xmm0); | |
1268 __ pop_f(xmm0); | |
1269 __ divss(xmm0, xmm1); | |
1270 break; | |
1271 case rem: | |
1272 __ movflt(xmm1, xmm0); | |
1273 __ pop_f(xmm0); | |
1274 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2); | |
1275 break; | |
1276 default: | |
1277 ShouldNotReachHere(); | |
1278 break; | |
1279 } | |
1280 } | |
1281 | |
1282 void TemplateTable::dop2(Operation op) { | |
1283 transition(dtos, dtos); | |
1284 switch (op) { | |
1285 case add: | |
1286 __ addsd(xmm0, at_rsp()); | |
1287 __ addq(rsp, 2 * Interpreter::stackElementSize()); | |
1288 break; | |
1289 case sub: | |
1290 __ movdbl(xmm1, xmm0); | |
1291 __ pop_d(xmm0); | |
1292 __ subsd(xmm0, xmm1); | |
1293 break; | |
1294 case mul: | |
1295 __ mulsd(xmm0, at_rsp()); | |
1296 __ addq(rsp, 2 * Interpreter::stackElementSize()); | |
1297 break; | |
1298 case div: | |
1299 __ movdbl(xmm1, xmm0); | |
1300 __ pop_d(xmm0); | |
1301 __ divsd(xmm0, xmm1); | |
1302 break; | |
1303 case rem: | |
1304 __ movdbl(xmm1, xmm0); | |
1305 __ pop_d(xmm0); | |
1306 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2); | |
1307 break; | |
1308 default: | |
1309 ShouldNotReachHere(); | |
1310 break; | |
1311 } | |
1312 } | |
1313 | |
1314 void TemplateTable::ineg() { | |
1315 transition(itos, itos); | |
1316 __ negl(rax); | |
1317 } | |
1318 | |
1319 void TemplateTable::lneg() { | |
1320 transition(ltos, ltos); | |
1321 __ negq(rax); | |
1322 } | |
1323 | |
1324 // Note: 'double' and 'long long' have 32-bits alignment on x86. | |
1325 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) { | |
1326 // Use the expression (adr)&(~0xF) to provide 128-bits aligned address | |
1327 // of 128-bits operands for SSE instructions. | |
1328 jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF))); | |
1329 // Store the value to a 128-bits operand. | |
1330 operand[0] = lo; | |
1331 operand[1] = hi; | |
1332 return operand; | |
1333 } | |
1334 | |
1335 // Buffer for 128-bits masks used by SSE instructions. | |
1336 static jlong float_signflip_pool[2*2]; | |
1337 static jlong double_signflip_pool[2*2]; | |
1338 | |
1339 void TemplateTable::fneg() { | |
1340 transition(ftos, ftos); | |
1341 static jlong *float_signflip = double_quadword(&float_signflip_pool[1], 0x8000000080000000, 0x8000000080000000); | |
1342 __ xorps(xmm0, ExternalAddress((address) float_signflip)); | |
1343 } | |
1344 | |
1345 void TemplateTable::dneg() { | |
1346 transition(dtos, dtos); | |
1347 static jlong *double_signflip = double_quadword(&double_signflip_pool[1], 0x8000000000000000, 0x8000000000000000); | |
1348 __ xorpd(xmm0, ExternalAddress((address) double_signflip)); | |
1349 } | |
1350 | |
1351 void TemplateTable::iinc() { | |
1352 transition(vtos, vtos); | |
1353 __ load_signed_byte(rdx, at_bcp(2)); // get constant | |
1354 locals_index(rbx); | |
1355 __ addl(iaddress(rbx), rdx); | |
1356 } | |
1357 | |
1358 void TemplateTable::wide_iinc() { | |
1359 transition(vtos, vtos); | |
1360 __ movl(rdx, at_bcp(4)); // get constant | |
1361 locals_index_wide(rbx); | |
1362 __ bswapl(rdx); // swap bytes & sign-extend constant | |
1363 __ sarl(rdx, 16); | |
1364 __ addl(iaddress(rbx), rdx); | |
1365 // Note: should probably use only one movl to get both | |
1366 // the index and the constant -> fix this | |
1367 } | |
1368 | |
1369 void TemplateTable::convert() { | |
1370 // Checking | |
1371 #ifdef ASSERT | |
1372 { | |
1373 TosState tos_in = ilgl; | |
1374 TosState tos_out = ilgl; | |
1375 switch (bytecode()) { | |
1376 case Bytecodes::_i2l: // fall through | |
1377 case Bytecodes::_i2f: // fall through | |
1378 case Bytecodes::_i2d: // fall through | |
1379 case Bytecodes::_i2b: // fall through | |
1380 case Bytecodes::_i2c: // fall through | |
1381 case Bytecodes::_i2s: tos_in = itos; break; | |
1382 case Bytecodes::_l2i: // fall through | |
1383 case Bytecodes::_l2f: // fall through | |
1384 case Bytecodes::_l2d: tos_in = ltos; break; | |
1385 case Bytecodes::_f2i: // fall through | |
1386 case Bytecodes::_f2l: // fall through | |
1387 case Bytecodes::_f2d: tos_in = ftos; break; | |
1388 case Bytecodes::_d2i: // fall through | |
1389 case Bytecodes::_d2l: // fall through | |
1390 case Bytecodes::_d2f: tos_in = dtos; break; | |
1391 default : ShouldNotReachHere(); | |
1392 } | |
1393 switch (bytecode()) { | |
1394 case Bytecodes::_l2i: // fall through | |
1395 case Bytecodes::_f2i: // fall through | |
1396 case Bytecodes::_d2i: // fall through | |
1397 case Bytecodes::_i2b: // fall through | |
1398 case Bytecodes::_i2c: // fall through | |
1399 case Bytecodes::_i2s: tos_out = itos; break; | |
1400 case Bytecodes::_i2l: // fall through | |
1401 case Bytecodes::_f2l: // fall through | |
1402 case Bytecodes::_d2l: tos_out = ltos; break; | |
1403 case Bytecodes::_i2f: // fall through | |
1404 case Bytecodes::_l2f: // fall through | |
1405 case Bytecodes::_d2f: tos_out = ftos; break; | |
1406 case Bytecodes::_i2d: // fall through | |
1407 case Bytecodes::_l2d: // fall through | |
1408 case Bytecodes::_f2d: tos_out = dtos; break; | |
1409 default : ShouldNotReachHere(); | |
1410 } | |
1411 transition(tos_in, tos_out); | |
1412 } | |
1413 #endif // ASSERT | |
1414 | |
1415 static const int64_t is_nan = 0x8000000000000000L; | |
1416 | |
1417 // Conversion | |
1418 switch (bytecode()) { | |
1419 case Bytecodes::_i2l: | |
1420 __ movslq(rax, rax); | |
1421 break; | |
1422 case Bytecodes::_i2f: | |
1423 __ cvtsi2ssl(xmm0, rax); | |
1424 break; | |
1425 case Bytecodes::_i2d: | |
1426 __ cvtsi2sdl(xmm0, rax); | |
1427 break; | |
1428 case Bytecodes::_i2b: | |
1429 __ movsbl(rax, rax); | |
1430 break; | |
1431 case Bytecodes::_i2c: | |
1432 __ movzwl(rax, rax); | |
1433 break; | |
1434 case Bytecodes::_i2s: | |
1435 __ movswl(rax, rax); | |
1436 break; | |
1437 case Bytecodes::_l2i: | |
1438 __ movl(rax, rax); | |
1439 break; | |
1440 case Bytecodes::_l2f: | |
1441 __ cvtsi2ssq(xmm0, rax); | |
1442 break; | |
1443 case Bytecodes::_l2d: | |
1444 __ cvtsi2sdq(xmm0, rax); | |
1445 break; | |
1446 case Bytecodes::_f2i: | |
1447 { | |
1448 Label L; | |
1449 __ cvttss2sil(rax, xmm0); | |
1450 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? | |
1451 __ jcc(Assembler::notEqual, L); | |
1452 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1); | |
1453 __ bind(L); | |
1454 } | |
1455 break; | |
1456 case Bytecodes::_f2l: | |
1457 { | |
1458 Label L; | |
1459 __ cvttss2siq(rax, xmm0); | |
1460 // NaN or overflow/underflow? | |
1461 __ cmp64(rax, ExternalAddress((address) &is_nan)); | |
1462 __ jcc(Assembler::notEqual, L); | |
1463 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1); | |
1464 __ bind(L); | |
1465 } | |
1466 break; | |
1467 case Bytecodes::_f2d: | |
1468 __ cvtss2sd(xmm0, xmm0); | |
1469 break; | |
1470 case Bytecodes::_d2i: | |
1471 { | |
1472 Label L; | |
1473 __ cvttsd2sil(rax, xmm0); | |
1474 __ cmpl(rax, 0x80000000); // NaN or overflow/underflow? | |
1475 __ jcc(Assembler::notEqual, L); | |
1476 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1); | |
1477 __ bind(L); | |
1478 } | |
1479 break; | |
1480 case Bytecodes::_d2l: | |
1481 { | |
1482 Label L; | |
1483 __ cvttsd2siq(rax, xmm0); | |
1484 // NaN or overflow/underflow? | |
1485 __ cmp64(rax, ExternalAddress((address) &is_nan)); | |
1486 __ jcc(Assembler::notEqual, L); | |
1487 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1); | |
1488 __ bind(L); | |
1489 } | |
1490 break; | |
1491 case Bytecodes::_d2f: | |
1492 __ cvtsd2ss(xmm0, xmm0); | |
1493 break; | |
1494 default: | |
1495 ShouldNotReachHere(); | |
1496 } | |
1497 } | |
1498 | |
1499 void TemplateTable::lcmp() { | |
1500 transition(ltos, itos); | |
1501 Label done; | |
1502 __ pop_l(rdx); | |
1503 __ cmpq(rdx, rax); | |
1504 __ movl(rax, -1); | |
1505 __ jccb(Assembler::less, done); | |
1506 __ setb(Assembler::notEqual, rax); | |
1507 __ movzbl(rax, rax); | |
1508 __ bind(done); | |
1509 } | |
1510 | |
1511 void TemplateTable::float_cmp(bool is_float, int unordered_result) { | |
1512 Label done; | |
1513 if (is_float) { | |
1514 // XXX get rid of pop here, use ... reg, mem32 | |
1515 __ pop_f(xmm1); | |
1516 __ ucomiss(xmm1, xmm0); | |
1517 } else { | |
1518 // XXX get rid of pop here, use ... reg, mem64 | |
1519 __ pop_d(xmm1); | |
1520 __ ucomisd(xmm1, xmm0); | |
1521 } | |
1522 if (unordered_result < 0) { | |
1523 __ movl(rax, -1); | |
1524 __ jccb(Assembler::parity, done); | |
1525 __ jccb(Assembler::below, done); | |
1526 __ setb(Assembler::notEqual, rdx); | |
1527 __ movzbl(rax, rdx); | |
1528 } else { | |
1529 __ movl(rax, 1); | |
1530 __ jccb(Assembler::parity, done); | |
1531 __ jccb(Assembler::above, done); | |
1532 __ movl(rax, 0); | |
1533 __ jccb(Assembler::equal, done); | |
1534 __ decrementl(rax); | |
1535 } | |
1536 __ bind(done); | |
1537 } | |
1538 | |
1539 void TemplateTable::branch(bool is_jsr, bool is_wide) { | |
1540 __ get_method(rcx); // rcx holds method | |
1541 __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx | |
1542 // holds bumped taken count | |
1543 | |
1544 const ByteSize be_offset = methodOopDesc::backedge_counter_offset() + | |
1545 InvocationCounter::counter_offset(); | |
1546 const ByteSize inv_offset = methodOopDesc::invocation_counter_offset() + | |
1547 InvocationCounter::counter_offset(); | |
1548 const int method_offset = frame::interpreter_frame_method_offset * wordSize; | |
1549 | |
1550 // Load up edx with the branch displacement | |
1551 __ movl(rdx, at_bcp(1)); | |
1552 __ bswapl(rdx); | |
1553 | |
1554 if (!is_wide) { | |
1555 __ sarl(rdx, 16); | |
1556 } | |
1557 __ movslq(rdx, rdx); | |
1558 | |
1559 // Handle all the JSR stuff here, then exit. | |
1560 // It's much shorter and cleaner than intermingling with the non-JSR | |
1561 // normal-branch stuff occuring below. | |
1562 if (is_jsr) { | |
1563 // Pre-load the next target bytecode into rbx | |
1564 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0)); | |
1565 | |
1566 // compute return address as bci in rax | |
1567 __ leaq(rax, at_bcp((is_wide ? 5 : 3) - | |
1568 in_bytes(constMethodOopDesc::codes_offset()))); | |
1569 __ subq(rax, Address(rcx, methodOopDesc::const_offset())); | |
1570 // Adjust the bcp in r13 by the displacement in rdx | |
1571 __ addq(r13, rdx); | |
1572 // jsr returns atos that is not an oop | |
1573 __ push_i(rax); | |
1574 __ dispatch_only(vtos); | |
1575 return; | |
1576 } | |
1577 | |
1578 // Normal (non-jsr) branch handling | |
1579 | |
1580 // Adjust the bcp in r13 by the displacement in rdx | |
1581 __ addq(r13, rdx); | |
1582 | |
1583 assert(UseLoopCounter || !UseOnStackReplacement, | |
1584 "on-stack-replacement requires loop counters"); | |
1585 Label backedge_counter_overflow; | |
1586 Label profile_method; | |
1587 Label dispatch; | |
1588 if (UseLoopCounter) { | |
1589 // increment backedge counter for backward branches | |
1590 // rax: MDO | |
1591 // ebx: MDO bumped taken-count | |
1592 // rcx: method | |
1593 // rdx: target offset | |
1594 // r13: target bcp | |
1595 // r14: locals pointer | |
1596 __ testl(rdx, rdx); // check if forward or backward branch | |
1597 __ jcc(Assembler::positive, dispatch); // count only if backward branch | |
1598 | |
1599 // increment counter | |
1600 __ movl(rax, Address(rcx, be_offset)); // load backedge counter | |
1601 __ incrementl(rax, InvocationCounter::count_increment); // increment | |
1602 // counter | |
1603 __ movl(Address(rcx, be_offset), rax); // store counter | |
1604 | |
1605 __ movl(rax, Address(rcx, inv_offset)); // load invocation counter | |
1606 __ andl(rax, InvocationCounter::count_mask_value); // and the status bits | |
1607 __ addl(rax, Address(rcx, be_offset)); // add both counters | |
1608 | |
1609 if (ProfileInterpreter) { | |
1610 // Test to see if we should create a method data oop | |
1611 __ cmp32(rax, | |
1612 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit)); | |
1613 __ jcc(Assembler::less, dispatch); | |
1614 | |
1615 // if no method data exists, go to profile method | |
1616 __ test_method_data_pointer(rax, profile_method); | |
1617 | |
1618 if (UseOnStackReplacement) { | |
1619 // check for overflow against ebx which is the MDO taken count | |
1620 __ cmp32(rbx, | |
1621 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); | |
1622 __ jcc(Assembler::below, dispatch); | |
1623 | |
1624 // When ProfileInterpreter is on, the backedge_count comes | |
1625 // from the methodDataOop, which value does not get reset on | |
1626 // the call to frequency_counter_overflow(). To avoid | |
1627 // excessive calls to the overflow routine while the method is | |
1628 // being compiled, add a second test to make sure the overflow | |
1629 // function is called only once every overflow_frequency. | |
1630 const int overflow_frequency = 1024; | |
1631 __ andl(rbx, overflow_frequency - 1); | |
1632 __ jcc(Assembler::zero, backedge_counter_overflow); | |
1633 | |
1634 } | |
1635 } else { | |
1636 if (UseOnStackReplacement) { | |
1637 // check for overflow against eax, which is the sum of the | |
1638 // counters | |
1639 __ cmp32(rax, | |
1640 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit)); | |
1641 __ jcc(Assembler::aboveEqual, backedge_counter_overflow); | |
1642 | |
1643 } | |
1644 } | |
1645 __ bind(dispatch); | |
1646 } | |
1647 | |
1648 // Pre-load the next target bytecode into rbx | |
1649 __ load_unsigned_byte(rbx, Address(r13, 0)); | |
1650 | |
1651 // continue with the bytecode @ target | |
1652 // eax: return bci for jsr's, unused otherwise | |
1653 // ebx: target bytecode | |
1654 // r13: target bcp | |
1655 __ dispatch_only(vtos); | |
1656 | |
1657 if (UseLoopCounter) { | |
1658 if (ProfileInterpreter) { | |
1659 // Out-of-line code to allocate method data oop. | |
1660 __ bind(profile_method); | |
1661 __ call_VM(noreg, | |
1662 CAST_FROM_FN_PTR(address, | |
1663 InterpreterRuntime::profile_method), r13); | |
1664 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode | |
1665 __ movq(rcx, Address(rbp, method_offset)); | |
1666 __ movq(rcx, Address(rcx, | |
1667 in_bytes(methodOopDesc::method_data_offset()))); | |
1668 __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), | |
1669 rcx); | |
1670 __ test_method_data_pointer(rcx, dispatch); | |
1671 // offset non-null mdp by MDO::data_offset() + IR::profile_method() | |
1672 __ addq(rcx, in_bytes(methodDataOopDesc::data_offset())); | |
1673 __ addq(rcx, rax); | |
1674 __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), | |
1675 rcx); | |
1676 __ jmp(dispatch); | |
1677 } | |
1678 | |
1679 if (UseOnStackReplacement) { | |
1680 // invocation counter overflow | |
1681 __ bind(backedge_counter_overflow); | |
1682 __ negq(rdx); | |
1683 __ addq(rdx, r13); // branch bcp | |
1684 // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp) | |
1685 __ call_VM(noreg, | |
1686 CAST_FROM_FN_PTR(address, | |
1687 InterpreterRuntime::frequency_counter_overflow), | |
1688 rdx); | |
1689 __ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode | |
1690 | |
1691 // rax: osr nmethod (osr ok) or NULL (osr not possible) | |
1692 // ebx: target bytecode | |
1693 // rdx: scratch | |
1694 // r14: locals pointer | |
1695 // r13: bcp | |
1696 __ testq(rax, rax); // test result | |
1697 __ jcc(Assembler::zero, dispatch); // no osr if null | |
1698 // nmethod may have been invalidated (VM may block upon call_VM return) | |
1699 __ movl(rcx, Address(rax, nmethod::entry_bci_offset())); | |
1700 __ cmpl(rcx, InvalidOSREntryBci); | |
1701 __ jcc(Assembler::equal, dispatch); | |
1702 | |
1703 // We have the address of an on stack replacement routine in eax | |
1704 // We need to prepare to execute the OSR method. First we must | |
1705 // migrate the locals and monitors off of the stack. | |
1706 | |
1707 __ movq(r13, rax); // save the nmethod | |
1708 | |
1709 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin)); | |
1710 | |
1711 // eax is OSR buffer, move it to expected parameter location | |
1712 __ movq(j_rarg0, rax); | |
1713 | |
1714 // We use j_rarg definitions here so that registers don't conflict as parameter | |
1715 // registers change across platforms as we are in the midst of a calling | |
1716 // sequence to the OSR nmethod and we don't want collision. These are NOT parameters. | |
1717 | |
1718 const Register retaddr = j_rarg2; | |
1719 const Register sender_sp = j_rarg1; | |
1720 | |
1721 // pop the interpreter frame | |
1722 __ movq(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp | |
1723 __ leave(); // remove frame anchor | |
1724 __ popq(retaddr); // get return address | |
1725 __ movq(rsp, sender_sp); // set sp to sender sp | |
1726 // Ensure compiled code always sees stack at proper alignment | |
1727 __ andq(rsp, -(StackAlignmentInBytes)); | |
1728 | |
1729 // unlike x86 we need no specialized return from compiled code | |
1730 // to the interpreter or the call stub. | |
1731 | |
1732 // push the return address | |
1733 __ pushq(retaddr); | |
1734 | |
1735 // and begin the OSR nmethod | |
1736 __ jmp(Address(r13, nmethod::osr_entry_point_offset())); | |
1737 } | |
1738 } | |
1739 } | |
1740 | |
1741 | |
1742 void TemplateTable::if_0cmp(Condition cc) { | |
1743 transition(itos, vtos); | |
1744 // assume branch is more often taken than not (loops use backward branches) | |
1745 Label not_taken; | |
1746 __ testl(rax, rax); | |
1747 __ jcc(j_not(cc), not_taken); | |
1748 branch(false, false); | |
1749 __ bind(not_taken); | |
1750 __ profile_not_taken_branch(rax); | |
1751 } | |
1752 | |
1753 void TemplateTable::if_icmp(Condition cc) { | |
1754 transition(itos, vtos); | |
1755 // assume branch is more often taken than not (loops use backward branches) | |
1756 Label not_taken; | |
1757 __ pop_i(rdx); | |
1758 __ cmpl(rdx, rax); | |
1759 __ jcc(j_not(cc), not_taken); | |
1760 branch(false, false); | |
1761 __ bind(not_taken); | |
1762 __ profile_not_taken_branch(rax); | |
1763 } | |
1764 | |
1765 void TemplateTable::if_nullcmp(Condition cc) { | |
1766 transition(atos, vtos); | |
1767 // assume branch is more often taken than not (loops use backward branches) | |
1768 Label not_taken; | |
1769 __ testq(rax, rax); | |
1770 __ jcc(j_not(cc), not_taken); | |
1771 branch(false, false); | |
1772 __ bind(not_taken); | |
1773 __ profile_not_taken_branch(rax); | |
1774 } | |
1775 | |
1776 void TemplateTable::if_acmp(Condition cc) { | |
1777 transition(atos, vtos); | |
1778 // assume branch is more often taken than not (loops use backward branches) | |
1779 Label not_taken; | |
1780 __ pop_ptr(rdx); | |
1781 __ cmpq(rdx, rax); | |
1782 __ jcc(j_not(cc), not_taken); | |
1783 branch(false, false); | |
1784 __ bind(not_taken); | |
1785 __ profile_not_taken_branch(rax); | |
1786 } | |
1787 | |
1788 void TemplateTable::ret() { | |
1789 transition(vtos, vtos); | |
1790 locals_index(rbx); | |
1791 __ movq(rbx, aaddress(rbx)); // get return bci, compute return bcp | |
1792 __ profile_ret(rbx, rcx); | |
1793 __ get_method(rax); | |
1794 __ movq(r13, Address(rax, methodOopDesc::const_offset())); | |
1795 __ leaq(r13, Address(r13, rbx, Address::times_1, | |
1796 constMethodOopDesc::codes_offset())); | |
1797 __ dispatch_next(vtos); | |
1798 } | |
1799 | |
1800 void TemplateTable::wide_ret() { | |
1801 transition(vtos, vtos); | |
1802 locals_index_wide(rbx); | |
1803 __ movq(rbx, aaddress(rbx)); // get return bci, compute return bcp | |
1804 __ profile_ret(rbx, rcx); | |
1805 __ get_method(rax); | |
1806 __ movq(r13, Address(rax, methodOopDesc::const_offset())); | |
1807 __ leaq(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset())); | |
1808 __ dispatch_next(vtos); | |
1809 } | |
1810 | |
1811 void TemplateTable::tableswitch() { | |
1812 Label default_case, continue_execution; | |
1813 transition(itos, vtos); | |
1814 // align r13 | |
1815 __ leaq(rbx, at_bcp(BytesPerInt)); | |
1816 __ andq(rbx, -BytesPerInt); | |
1817 // load lo & hi | |
1818 __ movl(rcx, Address(rbx, BytesPerInt)); | |
1819 __ movl(rdx, Address(rbx, 2 * BytesPerInt)); | |
1820 __ bswapl(rcx); | |
1821 __ bswapl(rdx); | |
1822 // check against lo & hi | |
1823 __ cmpl(rax, rcx); | |
1824 __ jcc(Assembler::less, default_case); | |
1825 __ cmpl(rax, rdx); | |
1826 __ jcc(Assembler::greater, default_case); | |
1827 // lookup dispatch offset | |
1828 __ subl(rax, rcx); | |
1829 __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt)); | |
1830 __ profile_switch_case(rax, rbx, rcx); | |
1831 // continue execution | |
1832 __ bind(continue_execution); | |
1833 __ bswapl(rdx); | |
1834 __ movslq(rdx, rdx); | |
1835 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); | |
1836 __ addq(r13, rdx); | |
1837 __ dispatch_only(vtos); | |
1838 // handle default | |
1839 __ bind(default_case); | |
1840 __ profile_switch_default(rax); | |
1841 __ movl(rdx, Address(rbx, 0)); | |
1842 __ jmp(continue_execution); | |
1843 } | |
1844 | |
1845 void TemplateTable::lookupswitch() { | |
1846 transition(itos, itos); | |
1847 __ stop("lookupswitch bytecode should have been rewritten"); | |
1848 } | |
1849 | |
1850 void TemplateTable::fast_linearswitch() { | |
1851 transition(itos, vtos); | |
1852 Label loop_entry, loop, found, continue_execution; | |
1853 // bswap rax so we can avoid bswapping the table entries | |
1854 __ bswapl(rax); | |
1855 // align r13 | |
1856 __ leaq(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of | |
1857 // this instruction (change offsets | |
1858 // below) | |
1859 __ andq(rbx, -BytesPerInt); | |
1860 // set counter | |
1861 __ movl(rcx, Address(rbx, BytesPerInt)); | |
1862 __ bswapl(rcx); | |
1863 __ jmpb(loop_entry); | |
1864 // table search | |
1865 __ bind(loop); | |
1866 __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt)); | |
1867 __ jcc(Assembler::equal, found); | |
1868 __ bind(loop_entry); | |
1869 __ decrementl(rcx); | |
1870 __ jcc(Assembler::greaterEqual, loop); | |
1871 // default case | |
1872 __ profile_switch_default(rax); | |
1873 __ movl(rdx, Address(rbx, 0)); | |
1874 __ jmp(continue_execution); | |
1875 // entry found -> get offset | |
1876 __ bind(found); | |
1877 __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt)); | |
1878 __ profile_switch_case(rcx, rax, rbx); | |
1879 // continue execution | |
1880 __ bind(continue_execution); | |
1881 __ bswapl(rdx); | |
1882 __ movslq(rdx, rdx); | |
1883 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1)); | |
1884 __ addq(r13, rdx); | |
1885 __ dispatch_only(vtos); | |
1886 } | |
1887 | |
1888 void TemplateTable::fast_binaryswitch() { | |
1889 transition(itos, vtos); | |
1890 // Implementation using the following core algorithm: | |
1891 // | |
1892 // int binary_search(int key, LookupswitchPair* array, int n) { | |
1893 // // Binary search according to "Methodik des Programmierens" by | |
1894 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. | |
1895 // int i = 0; | |
1896 // int j = n; | |
1897 // while (i+1 < j) { | |
1898 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) | |
1899 // // with Q: for all i: 0 <= i < n: key < a[i] | |
1900 // // where a stands for the array and assuming that the (inexisting) | |
1901 // // element a[n] is infinitely big. | |
1902 // int h = (i + j) >> 1; | |
1903 // // i < h < j | |
1904 // if (key < array[h].fast_match()) { | |
1905 // j = h; | |
1906 // } else { | |
1907 // i = h; | |
1908 // } | |
1909 // } | |
1910 // // R: a[i] <= key < a[i+1] or Q | |
1911 // // (i.e., if key is within array, i is the correct index) | |
1912 // return i; | |
1913 // } | |
1914 | |
1915 // Register allocation | |
1916 const Register key = rax; // already set (tosca) | |
1917 const Register array = rbx; | |
1918 const Register i = rcx; | |
1919 const Register j = rdx; | |
1920 const Register h = rdi; | |
1921 const Register temp = rsi; | |
1922 | |
1923 // Find array start | |
1924 __ leaq(array, at_bcp(3 * BytesPerInt)); // btw: should be able to | |
1925 // get rid of this | |
1926 // instruction (change | |
1927 // offsets below) | |
1928 __ andq(array, -BytesPerInt); | |
1929 | |
1930 // Initialize i & j | |
1931 __ xorl(i, i); // i = 0; | |
1932 __ movl(j, Address(array, -BytesPerInt)); // j = length(array); | |
1933 | |
1934 // Convert j into native byteordering | |
1935 __ bswapl(j); | |
1936 | |
1937 // And start | |
1938 Label entry; | |
1939 __ jmp(entry); | |
1940 | |
1941 // binary search loop | |
1942 { | |
1943 Label loop; | |
1944 __ bind(loop); | |
1945 // int h = (i + j) >> 1; | |
1946 __ leal(h, Address(i, j, Address::times_1)); // h = i + j; | |
1947 __ sarl(h, 1); // h = (i + j) >> 1; | |
1948 // if (key < array[h].fast_match()) { | |
1949 // j = h; | |
1950 // } else { | |
1951 // i = h; | |
1952 // } | |
1953 // Convert array[h].match to native byte-ordering before compare | |
1954 __ movl(temp, Address(array, h, Address::times_8)); | |
1955 __ bswapl(temp); | |
1956 __ cmpl(key, temp); | |
1957 // j = h if (key < array[h].fast_match()) | |
1958 __ cmovl(Assembler::less, j, h); | |
1959 // i = h if (key >= array[h].fast_match()) | |
1960 __ cmovl(Assembler::greaterEqual, i, h); | |
1961 // while (i+1 < j) | |
1962 __ bind(entry); | |
1963 __ leal(h, Address(i, 1)); // i+1 | |
1964 __ cmpl(h, j); // i+1 < j | |
1965 __ jcc(Assembler::less, loop); | |
1966 } | |
1967 | |
1968 // end of binary search, result index is i (must check again!) | |
1969 Label default_case; | |
1970 // Convert array[i].match to native byte-ordering before compare | |
1971 __ movl(temp, Address(array, i, Address::times_8)); | |
1972 __ bswapl(temp); | |
1973 __ cmpl(key, temp); | |
1974 __ jcc(Assembler::notEqual, default_case); | |
1975 | |
1976 // entry found -> j = offset | |
1977 __ movl(j , Address(array, i, Address::times_8, BytesPerInt)); | |
1978 __ profile_switch_case(i, key, array); | |
1979 __ bswapl(j); | |
1980 __ movslq(j, j); | |
1981 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); | |
1982 __ addq(r13, j); | |
1983 __ dispatch_only(vtos); | |
1984 | |
1985 // default case -> j = default offset | |
1986 __ bind(default_case); | |
1987 __ profile_switch_default(i); | |
1988 __ movl(j, Address(array, -2 * BytesPerInt)); | |
1989 __ bswapl(j); | |
1990 __ movslq(j, j); | |
1991 __ load_unsigned_byte(rbx, Address(r13, j, Address::times_1)); | |
1992 __ addq(r13, j); | |
1993 __ dispatch_only(vtos); | |
1994 } | |
1995 | |
1996 | |
1997 void TemplateTable::_return(TosState state) { | |
1998 transition(state, state); | |
1999 assert(_desc->calls_vm(), | |
2000 "inconsistent calls_vm information"); // call in remove_activation | |
2001 | |
2002 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { | |
2003 assert(state == vtos, "only valid state"); | |
2004 __ movq(c_rarg1, aaddress(0)); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2005 __ load_klass(rdi, c_rarg1); |
0 | 2006 __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); |
2007 __ testl(rdi, JVM_ACC_HAS_FINALIZER); | |
2008 Label skip_register_finalizer; | |
2009 __ jcc(Assembler::zero, skip_register_finalizer); | |
2010 | |
2011 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), c_rarg1); | |
2012 | |
2013 __ bind(skip_register_finalizer); | |
2014 } | |
2015 | |
2016 __ remove_activation(state, r13); | |
2017 __ jmp(r13); | |
2018 } | |
2019 | |
2020 // ---------------------------------------------------------------------------- | |
2021 // Volatile variables demand their effects be made known to all CPU's | |
2022 // in order. Store buffers on most chips allow reads & writes to | |
2023 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode | |
2024 // without some kind of memory barrier (i.e., it's not sufficient that | |
2025 // the interpreter does not reorder volatile references, the hardware | |
2026 // also must not reorder them). | |
2027 // | |
2028 // According to the new Java Memory Model (JMM): | |
2029 // (1) All volatiles are serialized wrt to each other. ALSO reads & | |
2030 // writes act as aquire & release, so: | |
2031 // (2) A read cannot let unrelated NON-volatile memory refs that | |
2032 // happen after the read float up to before the read. It's OK for | |
2033 // non-volatile memory refs that happen before the volatile read to | |
2034 // float down below it. | |
2035 // (3) Similar a volatile write cannot let unrelated NON-volatile | |
2036 // memory refs that happen BEFORE the write float down to after the | |
2037 // write. It's OK for non-volatile memory refs that happen after the | |
2038 // volatile write to float up before it. | |
2039 // | |
2040 // We only put in barriers around volatile refs (they are expensive), | |
2041 // not _between_ memory refs (that would require us to track the | |
2042 // flavor of the previous memory refs). Requirements (2) and (3) | |
2043 // require some barriers before volatile stores and after volatile | |
2044 // loads. These nearly cover requirement (1) but miss the | |
2045 // volatile-store-volatile-load case. This final case is placed after | |
2046 // volatile-stores although it could just as well go before | |
2047 // volatile-loads. | |
2048 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits | |
2049 order_constraint) { | |
2050 // Helper function to insert a is-volatile test and memory barrier | |
2051 if (os::is_MP()) { // Not needed on single CPU | |
2052 __ membar(order_constraint); | |
2053 } | |
2054 } | |
2055 | |
2056 void TemplateTable::resolve_cache_and_index(int byte_no, | |
2057 Register Rcache, | |
2058 Register index) { | |
2059 assert(byte_no == 1 || byte_no == 2, "byte_no out of range"); | |
2060 | |
2061 const Register temp = rbx; | |
2062 assert_different_registers(Rcache, index, temp); | |
2063 | |
2064 const int shift_count = (1 + byte_no) * BitsPerByte; | |
2065 Label resolved; | |
2066 __ get_cache_and_index_at_bcp(Rcache, index, 1); | |
2067 __ movl(temp, Address(Rcache, | |
2068 index, Address::times_8, | |
2069 constantPoolCacheOopDesc::base_offset() + | |
2070 ConstantPoolCacheEntry::indices_offset())); | |
2071 __ shrl(temp, shift_count); | |
2072 // have we resolved this bytecode? | |
2073 __ andl(temp, 0xFF); | |
2074 __ cmpl(temp, (int) bytecode()); | |
2075 __ jcc(Assembler::equal, resolved); | |
2076 | |
2077 // resolve first time through | |
2078 address entry; | |
2079 switch (bytecode()) { | |
2080 case Bytecodes::_getstatic: | |
2081 case Bytecodes::_putstatic: | |
2082 case Bytecodes::_getfield: | |
2083 case Bytecodes::_putfield: | |
2084 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); | |
2085 break; | |
2086 case Bytecodes::_invokevirtual: | |
2087 case Bytecodes::_invokespecial: | |
2088 case Bytecodes::_invokestatic: | |
2089 case Bytecodes::_invokeinterface: | |
2090 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); | |
2091 break; | |
2092 default: | |
2093 ShouldNotReachHere(); | |
2094 break; | |
2095 } | |
2096 __ movl(temp, (int) bytecode()); | |
2097 __ call_VM(noreg, entry, temp); | |
2098 | |
2099 // Update registers with resolved info | |
2100 __ get_cache_and_index_at_bcp(Rcache, index, 1); | |
2101 __ bind(resolved); | |
2102 } | |
2103 | |
2104 // The Rcache and index registers must be set before call | |
2105 void TemplateTable::load_field_cp_cache_entry(Register obj, | |
2106 Register cache, | |
2107 Register index, | |
2108 Register off, | |
2109 Register flags, | |
2110 bool is_static = false) { | |
2111 assert_different_registers(cache, index, flags, off); | |
2112 | |
2113 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); | |
2114 // Field offset | |
2115 __ movq(off, Address(cache, index, Address::times_8, | |
2116 in_bytes(cp_base_offset + | |
2117 ConstantPoolCacheEntry::f2_offset()))); | |
2118 // Flags | |
2119 __ movl(flags, Address(cache, index, Address::times_8, | |
2120 in_bytes(cp_base_offset + | |
2121 ConstantPoolCacheEntry::flags_offset()))); | |
2122 | |
2123 // klass overwrite register | |
2124 if (is_static) { | |
2125 __ movq(obj, Address(cache, index, Address::times_8, | |
2126 in_bytes(cp_base_offset + | |
2127 ConstantPoolCacheEntry::f1_offset()))); | |
2128 } | |
2129 } | |
2130 | |
2131 void TemplateTable::load_invoke_cp_cache_entry(int byte_no, | |
2132 Register method, | |
2133 Register itable_index, | |
2134 Register flags, | |
2135 bool is_invokevirtual, | |
2136 bool is_invokevfinal /*unused*/) { | |
2137 // setup registers | |
2138 const Register cache = rcx; | |
2139 const Register index = rdx; | |
2140 assert_different_registers(method, flags); | |
2141 assert_different_registers(method, cache, index); | |
2142 assert_different_registers(itable_index, flags); | |
2143 assert_different_registers(itable_index, cache, index); | |
2144 // determine constant pool cache field offsets | |
2145 const int method_offset = in_bytes( | |
2146 constantPoolCacheOopDesc::base_offset() + | |
2147 (is_invokevirtual | |
2148 ? ConstantPoolCacheEntry::f2_offset() | |
2149 : ConstantPoolCacheEntry::f1_offset())); | |
2150 const int flags_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2151 ConstantPoolCacheEntry::flags_offset()); | |
2152 // access constant pool cache fields | |
2153 const int index_offset = in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2154 ConstantPoolCacheEntry::f2_offset()); | |
2155 | |
2156 resolve_cache_and_index(byte_no, cache, index); | |
2157 | |
2158 assert(wordSize == 8, "adjust code below"); | |
2159 __ movq(method, Address(cache, index, Address::times_8, method_offset)); | |
2160 if (itable_index != noreg) { | |
2161 __ movq(itable_index, | |
2162 Address(cache, index, Address::times_8, index_offset)); | |
2163 } | |
2164 __ movl(flags , Address(cache, index, Address::times_8, flags_offset)); | |
2165 } | |
2166 | |
2167 | |
2168 // The registers cache and index expected to be set before call. | |
2169 // Correct values of the cache and index registers are preserved. | |
2170 void TemplateTable::jvmti_post_field_access(Register cache, Register index, | |
2171 bool is_static, bool has_tos) { | |
2172 // do the JVMTI work here to avoid disturbing the register state below | |
2173 // We use c_rarg registers here because we want to use the register used in | |
2174 // the call to the VM | |
2175 if (JvmtiExport::can_post_field_access()) { | |
2176 // Check to see if a field access watch has been set before we | |
2177 // take the time to call into the VM. | |
2178 Label L1; | |
2179 assert_different_registers(cache, index, rax); | |
2180 __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); | |
2181 __ testl(rax, rax); | |
2182 __ jcc(Assembler::zero, L1); | |
2183 | |
2184 __ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1); | |
2185 | |
2186 // cache entry pointer | |
2187 __ addq(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset())); | |
2188 __ shll(c_rarg3, LogBytesPerWord); | |
2189 __ addq(c_rarg2, c_rarg3); | |
2190 if (is_static) { | |
2191 __ xorl(c_rarg1, c_rarg1); // NULL object reference | |
2192 } else { | |
2193 __ movq(c_rarg1, at_tos()); // get object pointer without popping it | |
2194 __ verify_oop(c_rarg1); | |
2195 } | |
2196 // c_rarg1: object pointer or NULL | |
2197 // c_rarg2: cache entry pointer | |
2198 // c_rarg3: jvalue object on the stack | |
2199 __ call_VM(noreg, CAST_FROM_FN_PTR(address, | |
2200 InterpreterRuntime::post_field_access), | |
2201 c_rarg1, c_rarg2, c_rarg3); | |
2202 __ get_cache_and_index_at_bcp(cache, index, 1); | |
2203 __ bind(L1); | |
2204 } | |
2205 } | |
2206 | |
2207 void TemplateTable::pop_and_check_object(Register r) { | |
2208 __ pop_ptr(r); | |
2209 __ null_check(r); // for field access must check obj. | |
2210 __ verify_oop(r); | |
2211 } | |
2212 | |
2213 void TemplateTable::getfield_or_static(int byte_no, bool is_static) { | |
2214 transition(vtos, vtos); | |
2215 | |
2216 const Register cache = rcx; | |
2217 const Register index = rdx; | |
2218 const Register obj = c_rarg3; | |
2219 const Register off = rbx; | |
2220 const Register flags = rax; | |
2221 const Register bc = c_rarg3; // uses same reg as obj, so don't mix them | |
2222 | |
2223 resolve_cache_and_index(byte_no, cache, index); | |
2224 jvmti_post_field_access(cache, index, is_static, false); | |
2225 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); | |
2226 | |
2227 if (!is_static) { | |
2228 // obj is on the stack | |
2229 pop_and_check_object(obj); | |
2230 } | |
2231 | |
2232 const Address field(obj, off, Address::times_1); | |
2233 | |
2234 Label Done, notByte, notInt, notShort, notChar, | |
2235 notLong, notFloat, notObj, notDouble; | |
2236 | |
2237 __ shrl(flags, ConstantPoolCacheEntry::tosBits); | |
2238 assert(btos == 0, "change code, btos != 0"); | |
2239 | |
2240 __ andl(flags, 0x0F); | |
2241 __ jcc(Assembler::notZero, notByte); | |
2242 // btos | |
2243 __ load_signed_byte(rax, field); | |
2244 __ push(btos); | |
2245 // Rewrite bytecode to be faster | |
2246 if (!is_static) { | |
2247 patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx); | |
2248 } | |
2249 __ jmp(Done); | |
2250 | |
2251 __ bind(notByte); | |
2252 __ cmpl(flags, atos); | |
2253 __ jcc(Assembler::notEqual, notObj); | |
2254 // atos | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2255 __ load_heap_oop(rax, field); |
0 | 2256 __ push(atos); |
2257 if (!is_static) { | |
2258 patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx); | |
2259 } | |
2260 __ jmp(Done); | |
2261 | |
2262 __ bind(notObj); | |
2263 __ cmpl(flags, itos); | |
2264 __ jcc(Assembler::notEqual, notInt); | |
2265 // itos | |
2266 __ movl(rax, field); | |
2267 __ push(itos); | |
2268 // Rewrite bytecode to be faster | |
2269 if (!is_static) { | |
2270 patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx); | |
2271 } | |
2272 __ jmp(Done); | |
2273 | |
2274 __ bind(notInt); | |
2275 __ cmpl(flags, ctos); | |
2276 __ jcc(Assembler::notEqual, notChar); | |
2277 // ctos | |
2278 __ load_unsigned_word(rax, field); | |
2279 __ push(ctos); | |
2280 // Rewrite bytecode to be faster | |
2281 if (!is_static) { | |
2282 patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx); | |
2283 } | |
2284 __ jmp(Done); | |
2285 | |
2286 __ bind(notChar); | |
2287 __ cmpl(flags, stos); | |
2288 __ jcc(Assembler::notEqual, notShort); | |
2289 // stos | |
2290 __ load_signed_word(rax, field); | |
2291 __ push(stos); | |
2292 // Rewrite bytecode to be faster | |
2293 if (!is_static) { | |
2294 patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx); | |
2295 } | |
2296 __ jmp(Done); | |
2297 | |
2298 __ bind(notShort); | |
2299 __ cmpl(flags, ltos); | |
2300 __ jcc(Assembler::notEqual, notLong); | |
2301 // ltos | |
2302 __ movq(rax, field); | |
2303 __ push(ltos); | |
2304 // Rewrite bytecode to be faster | |
2305 if (!is_static) { | |
2306 patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx); | |
2307 } | |
2308 __ jmp(Done); | |
2309 | |
2310 __ bind(notLong); | |
2311 __ cmpl(flags, ftos); | |
2312 __ jcc(Assembler::notEqual, notFloat); | |
2313 // ftos | |
2314 __ movflt(xmm0, field); | |
2315 __ push(ftos); | |
2316 // Rewrite bytecode to be faster | |
2317 if (!is_static) { | |
2318 patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx); | |
2319 } | |
2320 __ jmp(Done); | |
2321 | |
2322 __ bind(notFloat); | |
2323 #ifdef ASSERT | |
2324 __ cmpl(flags, dtos); | |
2325 __ jcc(Assembler::notEqual, notDouble); | |
2326 #endif | |
2327 // dtos | |
2328 __ movdbl(xmm0, field); | |
2329 __ push(dtos); | |
2330 // Rewrite bytecode to be faster | |
2331 if (!is_static) { | |
2332 patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx); | |
2333 } | |
2334 #ifdef ASSERT | |
2335 __ jmp(Done); | |
2336 | |
2337 __ bind(notDouble); | |
2338 __ stop("Bad state"); | |
2339 #endif | |
2340 | |
2341 __ bind(Done); | |
2342 // [jk] not needed currently | |
2343 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad | | |
2344 // Assembler::LoadStore)); | |
2345 } | |
2346 | |
2347 | |
2348 void TemplateTable::getfield(int byte_no) { | |
2349 getfield_or_static(byte_no, false); | |
2350 } | |
2351 | |
2352 void TemplateTable::getstatic(int byte_no) { | |
2353 getfield_or_static(byte_no, true); | |
2354 } | |
2355 | |
2356 // The registers cache and index expected to be set before call. | |
2357 // The function may destroy various registers, just not the cache and index registers. | |
2358 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) { | |
2359 transition(vtos, vtos); | |
2360 | |
2361 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); | |
2362 | |
2363 if (JvmtiExport::can_post_field_modification()) { | |
2364 // Check to see if a field modification watch has been set before | |
2365 // we take the time to call into the VM. | |
2366 Label L1; | |
2367 assert_different_registers(cache, index, rax); | |
2368 __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); | |
2369 __ testl(rax, rax); | |
2370 __ jcc(Assembler::zero, L1); | |
2371 | |
2372 __ get_cache_and_index_at_bcp(c_rarg2, rscratch1, 1); | |
2373 | |
2374 if (is_static) { | |
2375 // Life is simple. Null out the object pointer. | |
2376 __ xorl(c_rarg1, c_rarg1); | |
2377 } else { | |
2378 // Life is harder. The stack holds the value on top, followed by | |
2379 // the object. We don't know the size of the value, though; it | |
2380 // could be one or two words depending on its type. As a result, | |
2381 // we must find the type to determine where the object is. | |
2382 __ movl(c_rarg3, Address(c_rarg2, rscratch1, | |
2383 Address::times_8, | |
2384 in_bytes(cp_base_offset + | |
2385 ConstantPoolCacheEntry::flags_offset()))); | |
2386 __ shrl(c_rarg3, ConstantPoolCacheEntry::tosBits); | |
2387 // Make sure we don't need to mask rcx for tosBits after the | |
2388 // above shift | |
2389 ConstantPoolCacheEntry::verify_tosBits(); | |
2390 __ movq(c_rarg1, at_tos_p1()); // initially assume a one word jvalue | |
2391 __ cmpl(c_rarg3, ltos); | |
2392 __ cmovq(Assembler::equal, | |
2393 c_rarg1, at_tos_p2()); // ltos (two word jvalue) | |
2394 __ cmpl(c_rarg3, dtos); | |
2395 __ cmovq(Assembler::equal, | |
2396 c_rarg1, at_tos_p2()); // dtos (two word jvalue) | |
2397 } | |
2398 // cache entry pointer | |
2399 __ addq(c_rarg2, in_bytes(cp_base_offset)); | |
2400 __ shll(rscratch1, LogBytesPerWord); | |
2401 __ addq(c_rarg2, rscratch1); | |
2402 // object (tos) | |
2403 __ movq(c_rarg3, rsp); | |
2404 // c_rarg1: object pointer set up above (NULL if static) | |
2405 // c_rarg2: cache entry pointer | |
2406 // c_rarg3: jvalue object on the stack | |
2407 __ call_VM(noreg, | |
2408 CAST_FROM_FN_PTR(address, | |
2409 InterpreterRuntime::post_field_modification), | |
2410 c_rarg1, c_rarg2, c_rarg3); | |
2411 __ get_cache_and_index_at_bcp(cache, index, 1); | |
2412 __ bind(L1); | |
2413 } | |
2414 } | |
2415 | |
2416 void TemplateTable::putfield_or_static(int byte_no, bool is_static) { | |
2417 transition(vtos, vtos); | |
2418 | |
2419 const Register cache = rcx; | |
2420 const Register index = rdx; | |
2421 const Register obj = rcx; | |
2422 const Register off = rbx; | |
2423 const Register flags = rax; | |
2424 const Register bc = c_rarg3; | |
2425 | |
2426 resolve_cache_and_index(byte_no, cache, index); | |
2427 jvmti_post_field_mod(cache, index, is_static); | |
2428 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static); | |
2429 | |
2430 // [jk] not needed currently | |
2431 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | | |
2432 // Assembler::StoreStore)); | |
2433 | |
2434 Label notVolatile, Done; | |
2435 __ movl(rdx, flags); | |
2436 __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2437 __ andl(rdx, 0x1); | |
2438 | |
2439 // field address | |
2440 const Address field(obj, off, Address::times_1); | |
2441 | |
2442 Label notByte, notInt, notShort, notChar, | |
2443 notLong, notFloat, notObj, notDouble; | |
2444 | |
2445 __ shrl(flags, ConstantPoolCacheEntry::tosBits); | |
2446 | |
2447 assert(btos == 0, "change code, btos != 0"); | |
2448 __ andl(flags, 0x0f); | |
2449 __ jcc(Assembler::notZero, notByte); | |
2450 // btos | |
2451 __ pop(btos); | |
2452 if (!is_static) pop_and_check_object(obj); | |
2453 __ movb(field, rax); | |
2454 if (!is_static) { | |
2455 patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx); | |
2456 } | |
2457 __ jmp(Done); | |
2458 | |
2459 __ bind(notByte); | |
2460 __ cmpl(flags, atos); | |
2461 __ jcc(Assembler::notEqual, notObj); | |
2462 // atos | |
2463 __ pop(atos); | |
2464 if (!is_static) pop_and_check_object(obj); | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2465 |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2466 // Store into the field |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2467 do_oop_store(_masm, field, rax, _bs->kind(), false); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2468 |
0 | 2469 if (!is_static) { |
2470 patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx); | |
2471 } | |
2472 __ jmp(Done); | |
2473 | |
2474 __ bind(notObj); | |
2475 __ cmpl(flags, itos); | |
2476 __ jcc(Assembler::notEqual, notInt); | |
2477 // itos | |
2478 __ pop(itos); | |
2479 if (!is_static) pop_and_check_object(obj); | |
2480 __ movl(field, rax); | |
2481 if (!is_static) { | |
2482 patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx); | |
2483 } | |
2484 __ jmp(Done); | |
2485 | |
2486 __ bind(notInt); | |
2487 __ cmpl(flags, ctos); | |
2488 __ jcc(Assembler::notEqual, notChar); | |
2489 // ctos | |
2490 __ pop(ctos); | |
2491 if (!is_static) pop_and_check_object(obj); | |
2492 __ movw(field, rax); | |
2493 if (!is_static) { | |
2494 patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx); | |
2495 } | |
2496 __ jmp(Done); | |
2497 | |
2498 __ bind(notChar); | |
2499 __ cmpl(flags, stos); | |
2500 __ jcc(Assembler::notEqual, notShort); | |
2501 // stos | |
2502 __ pop(stos); | |
2503 if (!is_static) pop_and_check_object(obj); | |
2504 __ movw(field, rax); | |
2505 if (!is_static) { | |
2506 patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx); | |
2507 } | |
2508 __ jmp(Done); | |
2509 | |
2510 __ bind(notShort); | |
2511 __ cmpl(flags, ltos); | |
2512 __ jcc(Assembler::notEqual, notLong); | |
2513 // ltos | |
2514 __ pop(ltos); | |
2515 if (!is_static) pop_and_check_object(obj); | |
2516 __ movq(field, rax); | |
2517 if (!is_static) { | |
2518 patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx); | |
2519 } | |
2520 __ jmp(Done); | |
2521 | |
2522 __ bind(notLong); | |
2523 __ cmpl(flags, ftos); | |
2524 __ jcc(Assembler::notEqual, notFloat); | |
2525 // ftos | |
2526 __ pop(ftos); | |
2527 if (!is_static) pop_and_check_object(obj); | |
2528 __ movflt(field, xmm0); | |
2529 if (!is_static) { | |
2530 patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx); | |
2531 } | |
2532 __ jmp(Done); | |
2533 | |
2534 __ bind(notFloat); | |
2535 #ifdef ASSERT | |
2536 __ cmpl(flags, dtos); | |
2537 __ jcc(Assembler::notEqual, notDouble); | |
2538 #endif | |
2539 // dtos | |
2540 __ pop(dtos); | |
2541 if (!is_static) pop_and_check_object(obj); | |
2542 __ movdbl(field, xmm0); | |
2543 if (!is_static) { | |
2544 patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx); | |
2545 } | |
2546 | |
2547 #ifdef ASSERT | |
2548 __ jmp(Done); | |
2549 | |
2550 __ bind(notDouble); | |
2551 __ stop("Bad state"); | |
2552 #endif | |
2553 | |
2554 __ bind(Done); | |
2555 // Check for volatile store | |
2556 __ testl(rdx, rdx); | |
2557 __ jcc(Assembler::zero, notVolatile); | |
2558 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | | |
2559 Assembler::StoreStore)); | |
2560 | |
2561 __ bind(notVolatile); | |
2562 } | |
2563 | |
2564 void TemplateTable::putfield(int byte_no) { | |
2565 putfield_or_static(byte_no, false); | |
2566 } | |
2567 | |
2568 void TemplateTable::putstatic(int byte_no) { | |
2569 putfield_or_static(byte_no, true); | |
2570 } | |
2571 | |
2572 void TemplateTable::jvmti_post_fast_field_mod() { | |
2573 if (JvmtiExport::can_post_field_modification()) { | |
2574 // Check to see if a field modification watch has been set before | |
2575 // we take the time to call into the VM. | |
2576 Label L2; | |
2577 __ mov32(c_rarg3, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr())); | |
2578 __ testl(c_rarg3, c_rarg3); | |
2579 __ jcc(Assembler::zero, L2); | |
2580 __ pop_ptr(rbx); // copy the object pointer from tos | |
2581 __ verify_oop(rbx); | |
2582 __ push_ptr(rbx); // put the object pointer back on tos | |
2583 __ subq(rsp, sizeof(jvalue)); // add space for a jvalue object | |
2584 __ movq(c_rarg3, rsp); | |
2585 const Address field(c_rarg3, 0); | |
2586 | |
2587 switch (bytecode()) { // load values into the jvalue object | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2588 case Bytecodes::_fast_aputfield: __ movq(field, rax); break; |
0 | 2589 case Bytecodes::_fast_lputfield: __ movq(field, rax); break; |
2590 case Bytecodes::_fast_iputfield: __ movl(field, rax); break; | |
2591 case Bytecodes::_fast_bputfield: __ movb(field, rax); break; | |
2592 case Bytecodes::_fast_sputfield: // fall through | |
2593 case Bytecodes::_fast_cputfield: __ movw(field, rax); break; | |
2594 case Bytecodes::_fast_fputfield: __ movflt(field, xmm0); break; | |
2595 case Bytecodes::_fast_dputfield: __ movdbl(field, xmm0); break; | |
2596 default: | |
2597 ShouldNotReachHere(); | |
2598 } | |
2599 | |
2600 // Save rax because call_VM() will clobber it, then use it for | |
2601 // JVMTI purposes | |
2602 __ pushq(rax); | |
2603 // access constant pool cache entry | |
2604 __ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1); | |
2605 __ verify_oop(rbx); | |
2606 // rbx: object pointer copied above | |
2607 // c_rarg2: cache entry pointer | |
2608 // c_rarg3: jvalue object on the stack | |
2609 __ call_VM(noreg, | |
2610 CAST_FROM_FN_PTR(address, | |
2611 InterpreterRuntime::post_field_modification), | |
2612 rbx, c_rarg2, c_rarg3); | |
2613 __ popq(rax); // restore lower value | |
2614 __ addq(rsp, sizeof(jvalue)); // release jvalue object space | |
2615 __ bind(L2); | |
2616 } | |
2617 } | |
2618 | |
2619 void TemplateTable::fast_storefield(TosState state) { | |
2620 transition(state, vtos); | |
2621 | |
2622 ByteSize base = constantPoolCacheOopDesc::base_offset(); | |
2623 | |
2624 jvmti_post_fast_field_mod(); | |
2625 | |
2626 // access constant pool cache | |
2627 __ get_cache_and_index_at_bcp(rcx, rbx, 1); | |
2628 | |
2629 // test for volatile with rdx | |
2630 __ movl(rdx, Address(rcx, rbx, Address::times_8, | |
2631 in_bytes(base + | |
2632 ConstantPoolCacheEntry::flags_offset()))); | |
2633 | |
2634 // replace index with field offset from cache entry | |
2635 __ movq(rbx, Address(rcx, rbx, Address::times_8, | |
2636 in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); | |
2637 | |
2638 // [jk] not needed currently | |
2639 // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore | | |
2640 // Assembler::StoreStore)); | |
2641 | |
2642 Label notVolatile; | |
2643 __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2644 __ andl(rdx, 0x1); | |
2645 | |
2646 // Get object from stack | |
2647 pop_and_check_object(rcx); | |
2648 | |
2649 // field address | |
2650 const Address field(rcx, rbx, Address::times_1); | |
2651 | |
2652 // access field | |
2653 switch (bytecode()) { | |
2654 case Bytecodes::_fast_aputfield: | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2655 do_oop_store(_masm, field, rax, _bs->kind(), false); |
0 | 2656 break; |
2657 case Bytecodes::_fast_lputfield: | |
2658 __ movq(field, rax); | |
2659 break; | |
2660 case Bytecodes::_fast_iputfield: | |
2661 __ movl(field, rax); | |
2662 break; | |
2663 case Bytecodes::_fast_bputfield: | |
2664 __ movb(field, rax); | |
2665 break; | |
2666 case Bytecodes::_fast_sputfield: | |
2667 // fall through | |
2668 case Bytecodes::_fast_cputfield: | |
2669 __ movw(field, rax); | |
2670 break; | |
2671 case Bytecodes::_fast_fputfield: | |
2672 __ movflt(field, xmm0); | |
2673 break; | |
2674 case Bytecodes::_fast_dputfield: | |
2675 __ movdbl(field, xmm0); | |
2676 break; | |
2677 default: | |
2678 ShouldNotReachHere(); | |
2679 } | |
2680 | |
2681 // Check for volatile store | |
2682 __ testl(rdx, rdx); | |
2683 __ jcc(Assembler::zero, notVolatile); | |
2684 volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | | |
2685 Assembler::StoreStore)); | |
2686 __ bind(notVolatile); | |
2687 } | |
2688 | |
2689 | |
2690 void TemplateTable::fast_accessfield(TosState state) { | |
2691 transition(atos, state); | |
2692 | |
2693 // Do the JVMTI work here to avoid disturbing the register state below | |
2694 if (JvmtiExport::can_post_field_access()) { | |
2695 // Check to see if a field access watch has been set before we | |
2696 // take the time to call into the VM. | |
2697 Label L1; | |
2698 __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr())); | |
2699 __ testl(rcx, rcx); | |
2700 __ jcc(Assembler::zero, L1); | |
2701 // access constant pool cache entry | |
2702 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2703 __ verify_oop(rax); |
0 | 2704 __ movq(r12, rax); // save object pointer before call_VM() clobbers it |
2705 __ movq(c_rarg1, rax); | |
2706 // c_rarg1: object pointer copied above | |
2707 // c_rarg2: cache entry pointer | |
2708 __ call_VM(noreg, | |
2709 CAST_FROM_FN_PTR(address, | |
2710 InterpreterRuntime::post_field_access), | |
2711 c_rarg1, c_rarg2); | |
2712 __ movq(rax, r12); // restore object pointer | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2713 __ reinit_heapbase(); |
0 | 2714 __ bind(L1); |
2715 } | |
2716 | |
2717 // access constant pool cache | |
2718 __ get_cache_and_index_at_bcp(rcx, rbx, 1); | |
2719 // replace index with field offset from cache entry | |
2720 // [jk] not needed currently | |
2721 // if (os::is_MP()) { | |
2722 // __ movl(rdx, Address(rcx, rbx, Address::times_8, | |
2723 // in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2724 // ConstantPoolCacheEntry::flags_offset()))); | |
2725 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2726 // __ andl(rdx, 0x1); | |
2727 // } | |
2728 __ movq(rbx, Address(rcx, rbx, Address::times_8, | |
2729 in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2730 ConstantPoolCacheEntry::f2_offset()))); | |
2731 | |
2732 // rax: object | |
2733 __ verify_oop(rax); | |
2734 __ null_check(rax); | |
2735 Address field(rax, rbx, Address::times_1); | |
2736 | |
2737 // access field | |
2738 switch (bytecode()) { | |
2739 case Bytecodes::_fast_agetfield: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2740 __ load_heap_oop(rax, field); |
0 | 2741 __ verify_oop(rax); |
2742 break; | |
2743 case Bytecodes::_fast_lgetfield: | |
2744 __ movq(rax, field); | |
2745 break; | |
2746 case Bytecodes::_fast_igetfield: | |
2747 __ movl(rax, field); | |
2748 break; | |
2749 case Bytecodes::_fast_bgetfield: | |
2750 __ movsbl(rax, field); | |
2751 break; | |
2752 case Bytecodes::_fast_sgetfield: | |
2753 __ load_signed_word(rax, field); | |
2754 break; | |
2755 case Bytecodes::_fast_cgetfield: | |
2756 __ load_unsigned_word(rax, field); | |
2757 break; | |
2758 case Bytecodes::_fast_fgetfield: | |
2759 __ movflt(xmm0, field); | |
2760 break; | |
2761 case Bytecodes::_fast_dgetfield: | |
2762 __ movdbl(xmm0, field); | |
2763 break; | |
2764 default: | |
2765 ShouldNotReachHere(); | |
2766 } | |
2767 // [jk] not needed currently | |
2768 // if (os::is_MP()) { | |
2769 // Label notVolatile; | |
2770 // __ testl(rdx, rdx); | |
2771 // __ jcc(Assembler::zero, notVolatile); | |
2772 // __ membar(Assembler::LoadLoad); | |
2773 // __ bind(notVolatile); | |
2774 //}; | |
2775 } | |
2776 | |
2777 void TemplateTable::fast_xaccess(TosState state) { | |
2778 transition(vtos, state); | |
2779 | |
2780 // get receiver | |
2781 __ movq(rax, aaddress(0)); | |
2782 debug_only(__ verify_local_tag(frame::TagReference, 0)); | |
2783 // access constant pool cache | |
2784 __ get_cache_and_index_at_bcp(rcx, rdx, 2); | |
2785 __ movq(rbx, | |
2786 Address(rcx, rdx, Address::times_8, | |
2787 in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2788 ConstantPoolCacheEntry::f2_offset()))); | |
2789 // make sure exception is reported in correct bcp range (getfield is | |
2790 // next instruction) | |
2791 __ incrementq(r13); | |
2792 __ null_check(rax); | |
2793 switch (state) { | |
2794 case itos: | |
2795 __ movl(rax, Address(rax, rbx, Address::times_1)); | |
2796 break; | |
2797 case atos: | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2798 __ load_heap_oop(rax, Address(rax, rbx, Address::times_1)); |
0 | 2799 __ verify_oop(rax); |
2800 break; | |
2801 case ftos: | |
2802 __ movflt(xmm0, Address(rax, rbx, Address::times_1)); | |
2803 break; | |
2804 default: | |
2805 ShouldNotReachHere(); | |
2806 } | |
2807 | |
2808 // [jk] not needed currently | |
2809 // if (os::is_MP()) { | |
2810 // Label notVolatile; | |
2811 // __ movl(rdx, Address(rcx, rdx, Address::times_8, | |
2812 // in_bytes(constantPoolCacheOopDesc::base_offset() + | |
2813 // ConstantPoolCacheEntry::flags_offset()))); | |
2814 // __ shrl(rdx, ConstantPoolCacheEntry::volatileField); | |
2815 // __ testl(rdx, 0x1); | |
2816 // __ jcc(Assembler::zero, notVolatile); | |
2817 // __ membar(Assembler::LoadLoad); | |
2818 // __ bind(notVolatile); | |
2819 // } | |
2820 | |
2821 __ decrementq(r13); | |
2822 } | |
2823 | |
2824 | |
2825 | |
2826 //----------------------------------------------------------------------------- | |
2827 // Calls | |
2828 | |
2829 void TemplateTable::count_calls(Register method, Register temp) { | |
2830 // implemented elsewhere | |
2831 ShouldNotReachHere(); | |
2832 } | |
2833 | |
2834 void TemplateTable::prepare_invoke(Register method, | |
2835 Register index, | |
2836 int byte_no, | |
2837 Bytecodes::Code code) { | |
2838 // determine flags | |
2839 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; | |
2840 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; | |
2841 const bool is_invokespecial = code == Bytecodes::_invokespecial; | |
2842 const bool load_receiver = code != Bytecodes::_invokestatic; | |
2843 const bool receiver_null_check = is_invokespecial; | |
2844 const bool save_flags = is_invokeinterface || is_invokevirtual; | |
2845 // setup registers & access constant pool cache | |
2846 const Register recv = rcx; | |
2847 const Register flags = rdx; | |
2848 assert_different_registers(method, index, recv, flags); | |
2849 | |
2850 // save 'interpreter return address' | |
2851 __ save_bcp(); | |
2852 | |
2853 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual); | |
2854 | |
2855 // load receiver if needed (note: no return address pushed yet) | |
2856 if (load_receiver) { | |
2857 __ movl(recv, flags); | |
2858 __ andl(recv, 0xFF); | |
2859 if (TaggedStackInterpreter) __ shll(recv, 1); // index*2 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2860 __ movq(recv, Address(rsp, recv, Address::times_8, |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
2861 -Interpreter::expr_offset_in_bytes(1))); |
0 | 2862 __ verify_oop(recv); |
2863 } | |
2864 | |
2865 // do null check if needed | |
2866 if (receiver_null_check) { | |
2867 __ null_check(recv); | |
2868 } | |
2869 | |
2870 if (save_flags) { | |
2871 __ movl(r13, flags); | |
2872 } | |
2873 | |
2874 // compute return type | |
2875 __ shrl(flags, ConstantPoolCacheEntry::tosBits); | |
2876 // Make sure we don't need to mask flags for tosBits after the above shift | |
2877 ConstantPoolCacheEntry::verify_tosBits(); | |
2878 // load return address | |
2879 { | |
2880 ExternalAddress return_5((address)Interpreter::return_5_addrs_by_index_table()); | |
2881 ExternalAddress return_3((address)Interpreter::return_3_addrs_by_index_table()); | |
2882 __ lea(rscratch1, (is_invokeinterface ? return_5 : return_3)); | |
2883 __ movq(flags, Address(rscratch1, flags, Address::times_8)); | |
2884 } | |
2885 | |
2886 // push return address | |
2887 __ pushq(flags); | |
2888 | |
2889 // Restore flag field from the constant pool cache, and restore esi | |
2890 // for later null checks. r13 is the bytecode pointer | |
2891 if (save_flags) { | |
2892 __ movl(flags, r13); | |
2893 __ restore_bcp(); | |
2894 } | |
2895 } | |
2896 | |
2897 | |
2898 void TemplateTable::invokevirtual_helper(Register index, | |
2899 Register recv, | |
2900 Register flags) { | |
2901 // Uses temporary registers rax, rdx assert_different_registers(index, recv, rax, rdx); | |
2902 | |
2903 // Test for an invoke of a final method | |
2904 Label notFinal; | |
2905 __ movl(rax, flags); | |
2906 __ andl(rax, (1 << ConstantPoolCacheEntry::vfinalMethod)); | |
2907 __ jcc(Assembler::zero, notFinal); | |
2908 | |
2909 const Register method = index; // method must be rbx | |
2910 assert(method == rbx, | |
2911 "methodOop must be rbx for interpreter calling convention"); | |
2912 | |
2913 // do the call - the index is actually the method to call | |
2914 __ verify_oop(method); | |
2915 | |
2916 // It's final, need a null check here! | |
2917 __ null_check(recv); | |
2918 | |
2919 // profile this call | |
2920 __ profile_final_call(rax); | |
2921 | |
2922 __ jump_from_interpreted(method, rax); | |
2923 | |
2924 __ bind(notFinal); | |
2925 | |
2926 // get receiver klass | |
2927 __ null_check(recv, oopDesc::klass_offset_in_bytes()); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2928 __ load_klass(rax, recv); |
0 | 2929 |
2930 __ verify_oop(rax); | |
2931 | |
2932 // profile this call | |
2933 __ profile_virtual_call(rax, r14, rdx); | |
2934 | |
2935 // get target methodOop & entry point | |
2936 const int base = instanceKlass::vtable_start_offset() * wordSize; | |
2937 assert(vtableEntry::size() * wordSize == 8, | |
2938 "adjust the scaling in the code below"); | |
2939 __ movq(method, Address(rax, index, | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2940 Address::times_8, |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
2941 base + vtableEntry::method_offset_in_bytes())); |
0 | 2942 __ movq(rdx, Address(method, methodOopDesc::interpreter_entry_offset())); |
2943 __ jump_from_interpreted(method, rdx); | |
2944 } | |
2945 | |
2946 | |
2947 void TemplateTable::invokevirtual(int byte_no) { | |
2948 transition(vtos, vtos); | |
2949 prepare_invoke(rbx, noreg, byte_no, bytecode()); | |
2950 | |
2951 // rbx: index | |
2952 // rcx: receiver | |
2953 // rdx: flags | |
2954 | |
2955 invokevirtual_helper(rbx, rcx, rdx); | |
2956 } | |
2957 | |
2958 | |
2959 void TemplateTable::invokespecial(int byte_no) { | |
2960 transition(vtos, vtos); | |
2961 prepare_invoke(rbx, noreg, byte_no, bytecode()); | |
2962 // do the call | |
2963 __ verify_oop(rbx); | |
2964 __ profile_call(rax); | |
2965 __ jump_from_interpreted(rbx, rax); | |
2966 } | |
2967 | |
2968 | |
2969 void TemplateTable::invokestatic(int byte_no) { | |
2970 transition(vtos, vtos); | |
2971 prepare_invoke(rbx, noreg, byte_no, bytecode()); | |
2972 // do the call | |
2973 __ verify_oop(rbx); | |
2974 __ profile_call(rax); | |
2975 __ jump_from_interpreted(rbx, rax); | |
2976 } | |
2977 | |
2978 void TemplateTable::fast_invokevfinal(int byte_no) { | |
2979 transition(vtos, vtos); | |
2980 __ stop("fast_invokevfinal not used on amd64"); | |
2981 } | |
2982 | |
2983 void TemplateTable::invokeinterface(int byte_no) { | |
2984 transition(vtos, vtos); | |
2985 prepare_invoke(rax, rbx, byte_no, bytecode()); | |
2986 | |
2987 // rax: Interface | |
2988 // rbx: index | |
2989 // rcx: receiver | |
2990 // rdx: flags | |
2991 | |
2992 // Special case of invokeinterface called for virtual method of | |
2993 // java.lang.Object. See cpCacheOop.cpp for details. | |
2994 // This code isn't produced by javac, but could be produced by | |
2995 // another compliant java compiler. | |
2996 Label notMethod; | |
2997 __ movl(r14, rdx); | |
2998 __ andl(r14, (1 << ConstantPoolCacheEntry::methodInterface)); | |
2999 __ jcc(Assembler::zero, notMethod); | |
3000 | |
3001 invokevirtual_helper(rbx, rcx, rdx); | |
3002 __ bind(notMethod); | |
3003 | |
3004 // Get receiver klass into rdx - also a null check | |
3005 __ restore_locals(); // restore r14 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3006 __ load_klass(rdx, rcx); |
0 | 3007 __ verify_oop(rdx); |
3008 | |
3009 // profile this call | |
3010 __ profile_virtual_call(rdx, r13, r14); | |
3011 | |
3012 __ movq(r14, rdx); // Save klassOop in r14 | |
3013 | |
3014 // Compute start of first itableOffsetEntry (which is at the end of | |
3015 // the vtable) | |
3016 const int base = instanceKlass::vtable_start_offset() * wordSize; | |
3017 // Get length of vtable | |
3018 assert(vtableEntry::size() * wordSize == 8, | |
3019 "adjust the scaling in the code below"); | |
3020 __ movl(r13, Address(rdx, | |
3021 instanceKlass::vtable_length_offset() * wordSize)); | |
3022 __ leaq(rdx, Address(rdx, r13, Address::times_8, base)); | |
3023 | |
3024 if (HeapWordsPerLong > 1) { | |
3025 // Round up to align_object_offset boundary | |
3026 __ round_to_q(rdx, BytesPerLong); | |
3027 } | |
3028 | |
3029 Label entry, search, interface_ok; | |
3030 | |
3031 __ jmpb(entry); | |
3032 __ bind(search); | |
3033 __ addq(rdx, itableOffsetEntry::size() * wordSize); | |
3034 | |
3035 __ bind(entry); | |
3036 | |
3037 // Check that the entry is non-null. A null entry means that the | |
3038 // receiver class doesn't implement the interface, and wasn't the | |
3039 // same as the receiver class checked when the interface was | |
3040 // resolved. | |
3041 __ pushq(rdx); | |
3042 __ movq(rdx, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); | |
3043 __ testq(rdx, rdx); | |
3044 __ jcc(Assembler::notZero, interface_ok); | |
3045 // throw exception | |
3046 __ popq(rdx); // pop saved register first. | |
3047 __ popq(rbx); // pop return address (pushed by prepare_invoke) | |
3048 __ restore_bcp(); // r13 must be correct for exception handler (was | |
3049 // destroyed) | |
3050 __ restore_locals(); // make sure locals pointer is correct as well | |
3051 // (was destroyed) | |
3052 __ call_VM(noreg, CAST_FROM_FN_PTR(address, | |
3053 InterpreterRuntime::throw_IncompatibleClassChangeError)); | |
3054 // the call_VM checks for exception, so we should never return here. | |
3055 __ should_not_reach_here(); | |
3056 __ bind(interface_ok); | |
3057 | |
3058 __ popq(rdx); | |
3059 | |
3060 __ cmpq(rax, Address(rdx, itableOffsetEntry::interface_offset_in_bytes())); | |
3061 __ jcc(Assembler::notEqual, search); | |
3062 | |
3063 __ movl(rdx, Address(rdx, itableOffsetEntry::offset_offset_in_bytes())); | |
3064 | |
3065 __ addq(rdx, r14); // Add offset to klassOop | |
3066 assert(itableMethodEntry::size() * wordSize == 8, | |
3067 "adjust the scaling in the code below"); | |
3068 __ movq(rbx, Address(rdx, rbx, Address::times_8)); | |
3069 // rbx: methodOop to call | |
3070 // rcx: receiver | |
3071 // Check for abstract method error | |
3072 // Note: This should be done more efficiently via a | |
3073 // throw_abstract_method_error interpreter entry point and a | |
3074 // conditional jump to it in case of a null method. | |
3075 { | |
3076 Label L; | |
3077 __ testq(rbx, rbx); | |
3078 __ jcc(Assembler::notZero, L); | |
3079 // throw exception | |
3080 // note: must restore interpreter registers to canonical | |
3081 // state for exception handling to work correctly! | |
3082 __ popq(rbx); // pop return address (pushed by prepare_invoke) | |
3083 __ restore_bcp(); // r13 must be correct for exception handler | |
3084 // (was destroyed) | |
3085 __ restore_locals(); // make sure locals pointer is correct as | |
3086 // well (was destroyed) | |
3087 __ call_VM(noreg, | |
3088 CAST_FROM_FN_PTR(address, | |
3089 InterpreterRuntime::throw_AbstractMethodError)); | |
3090 // the call_VM checks for exception, so we should never return here. | |
3091 __ should_not_reach_here(); | |
3092 __ bind(L); | |
3093 } | |
3094 | |
3095 __ movq(rcx, Address(rbx, methodOopDesc::interpreter_entry_offset())); | |
3096 | |
3097 // do the call | |
3098 // rcx: receiver | |
3099 // rbx: methodOop | |
3100 __ jump_from_interpreted(rbx, rdx); | |
3101 } | |
3102 | |
3103 //----------------------------------------------------------------------------- | |
3104 // Allocation | |
3105 | |
3106 void TemplateTable::_new() { | |
3107 transition(vtos, atos); | |
3108 __ get_unsigned_2_byte_index_at_bcp(rdx, 1); | |
3109 Label slow_case; | |
3110 Label done; | |
3111 Label initialize_header; | |
3112 Label initialize_object; // including clearing the fields | |
3113 Label allocate_shared; | |
3114 | |
3115 __ get_cpool_and_tags(rsi, rax); | |
3116 // get instanceKlass | |
3117 __ movq(rsi, Address(rsi, rdx, | |
3118 Address::times_8, sizeof(constantPoolOopDesc))); | |
3119 | |
3120 // make sure the class we're about to instantiate has been | |
3121 // resolved. Note: slow_case does a pop of stack, which is why we | |
3122 // loaded class/pushed above | |
3123 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; | |
3124 __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), | |
3125 JVM_CONSTANT_Class); | |
3126 __ jcc(Assembler::notEqual, slow_case); | |
3127 | |
3128 // make sure klass is initialized & doesn't have finalizer | |
3129 // make sure klass is fully initialized | |
3130 __ cmpl(Address(rsi, | |
3131 instanceKlass::init_state_offset_in_bytes() + | |
3132 sizeof(oopDesc)), | |
3133 instanceKlass::fully_initialized); | |
3134 __ jcc(Assembler::notEqual, slow_case); | |
3135 | |
3136 // get instance_size in instanceKlass (scaled to a count of bytes) | |
3137 __ movl(rdx, | |
3138 Address(rsi, | |
3139 Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); | |
3140 // test to see if it has a finalizer or is malformed in some way | |
3141 __ testl(rdx, Klass::_lh_instance_slow_path_bit); | |
3142 __ jcc(Assembler::notZero, slow_case); | |
3143 | |
3144 // Allocate the instance | |
3145 // 1) Try to allocate in the TLAB | |
3146 // 2) if fail and the object is large allocate in the shared Eden | |
3147 // 3) if the above fails (or is not applicable), go to a slow case | |
3148 // (creates a new TLAB, etc.) | |
3149 | |
3150 const bool allow_shared_alloc = | |
3151 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; | |
3152 | |
3153 if (UseTLAB) { | |
3154 __ movq(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset()))); | |
3155 __ leaq(rbx, Address(rax, rdx, Address::times_1)); | |
3156 __ cmpq(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset()))); | |
3157 __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case); | |
3158 __ movq(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx); | |
3159 if (ZeroTLAB) { | |
3160 // the fields have been already cleared | |
3161 __ jmp(initialize_header); | |
3162 } else { | |
3163 // initialize both the header and fields | |
3164 __ jmp(initialize_object); | |
3165 } | |
3166 } | |
3167 | |
3168 // Allocation in the shared Eden, if allowed. | |
3169 // | |
3170 // rdx: instance size in bytes | |
3171 if (allow_shared_alloc) { | |
3172 __ bind(allocate_shared); | |
3173 | |
342
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
3174 ExternalAddress top((address)Universe::heap()->top_addr()); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
3175 ExternalAddress end((address)Universe::heap()->end_addr()); |
37f87013dfd8
6711316: Open source the Garbage-First garbage collector
ysr
parents:
113
diff
changeset
|
3176 |
0 | 3177 const Register RtopAddr = rscratch1; |
3178 const Register RendAddr = rscratch2; | |
3179 | |
3180 __ lea(RtopAddr, top); | |
3181 __ lea(RendAddr, end); | |
3182 __ movq(rax, Address(RtopAddr, 0)); | |
3183 | |
3184 // For retries rax gets set by cmpxchgq | |
3185 Label retry; | |
3186 __ bind(retry); | |
3187 __ leaq(rbx, Address(rax, rdx, Address::times_1)); | |
3188 __ cmpq(rbx, Address(RendAddr, 0)); | |
3189 __ jcc(Assembler::above, slow_case); | |
3190 | |
3191 // Compare rax with the top addr, and if still equal, store the new | |
3192 // top addr in rbx at the address of the top addr pointer. Sets ZF if was | |
3193 // equal, and clears it otherwise. Use lock prefix for atomicity on MPs. | |
3194 // | |
3195 // rax: object begin | |
3196 // rbx: object end | |
3197 // rdx: instance size in bytes | |
3198 if (os::is_MP()) { | |
3199 __ lock(); | |
3200 } | |
3201 __ cmpxchgq(rbx, Address(RtopAddr, 0)); | |
3202 | |
3203 // if someone beat us on the allocation, try again, otherwise continue | |
3204 __ jcc(Assembler::notEqual, retry); | |
3205 } | |
3206 | |
3207 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { | |
3208 // The object is initialized before the header. If the object size is | |
3209 // zero, go directly to the header initialization. | |
3210 __ bind(initialize_object); | |
3211 __ decrementl(rdx, sizeof(oopDesc)); | |
3212 __ jcc(Assembler::zero, initialize_header); | |
3213 | |
3214 // Initialize object fields | |
3215 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) | |
3216 __ shrl(rdx, LogBytesPerLong); // divide by oopSize to simplify the loop | |
3217 { | |
3218 Label loop; | |
3219 __ bind(loop); | |
3220 __ movq(Address(rax, rdx, Address::times_8, | |
3221 sizeof(oopDesc) - oopSize), | |
3222 rcx); | |
3223 __ decrementl(rdx); | |
3224 __ jcc(Assembler::notZero, loop); | |
3225 } | |
3226 | |
3227 // initialize object header only. | |
3228 __ bind(initialize_header); | |
3229 if (UseBiasedLocking) { | |
3230 __ movq(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes())); | |
3231 __ movq(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1); | |
3232 } else { | |
3233 __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), | |
3234 (intptr_t) markOopDesc::prototype()); // header (address 0x1) | |
3235 } | |
167
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
113
diff
changeset
|
3236 __ xorl(rcx, rcx); // use zero reg to clear memory (shorter code) |
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
113
diff
changeset
|
3237 __ store_klass_gap(rax, rcx); // zero klass gap for compressed oops |
feeb96a45707
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
coleenp
parents:
113
diff
changeset
|
3238 __ store_klass(rax, rsi); // store klass last |
0 | 3239 __ jmp(done); |
3240 } | |
3241 | |
3242 { | |
3243 SkipIfEqual skip(_masm, &DTraceAllocProbes, false); | |
3244 // Trigger dtrace event for fastpath | |
3245 __ push(atos); // save the return value | |
3246 __ call_VM_leaf( | |
3247 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax); | |
3248 __ pop(atos); // restore the return value | |
3249 } | |
3250 | |
3251 // slow case | |
3252 __ bind(slow_case); | |
3253 __ get_constant_pool(c_rarg1); | |
3254 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); | |
3255 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2); | |
3256 __ verify_oop(rax); | |
3257 | |
3258 // continue | |
3259 __ bind(done); | |
3260 } | |
3261 | |
3262 void TemplateTable::newarray() { | |
3263 transition(itos, atos); | |
3264 __ load_unsigned_byte(c_rarg1, at_bcp(1)); | |
3265 __ movl(c_rarg2, rax); | |
3266 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), | |
3267 c_rarg1, c_rarg2); | |
3268 } | |
3269 | |
3270 void TemplateTable::anewarray() { | |
3271 transition(itos, atos); | |
3272 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1); | |
3273 __ get_constant_pool(c_rarg1); | |
3274 __ movl(c_rarg3, rax); | |
3275 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), | |
3276 c_rarg1, c_rarg2, c_rarg3); | |
3277 } | |
3278 | |
3279 void TemplateTable::arraylength() { | |
3280 transition(atos, itos); | |
3281 __ null_check(rax, arrayOopDesc::length_offset_in_bytes()); | |
3282 __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes())); | |
3283 } | |
3284 | |
3285 void TemplateTable::checkcast() { | |
3286 transition(atos, atos); | |
3287 Label done, is_null, ok_is_subtype, quicked, resolved; | |
3288 __ testq(rax, rax); // object is in rax | |
3289 __ jcc(Assembler::zero, is_null); | |
3290 | |
3291 // Get cpool & tags index | |
3292 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array | |
3293 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index | |
3294 // See if bytecode has already been quicked | |
3295 __ cmpb(Address(rdx, rbx, | |
3296 Address::times_1, | |
3297 typeArrayOopDesc::header_size(T_BYTE) * wordSize), | |
3298 JVM_CONSTANT_Class); | |
3299 __ jcc(Assembler::equal, quicked); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3300 __ push(atos); // save receiver for result, and for GC |
0 | 3301 __ movq(r12, rcx); // save rcx XXX |
3302 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3303 __ movq(rcx, r12); // restore rcx XXX |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3304 __ reinit_heapbase(); |
0 | 3305 __ pop_ptr(rdx); // restore receiver |
3306 __ jmpb(resolved); | |
3307 | |
3308 // Get superklass in rax and subklass in rbx | |
3309 __ bind(quicked); | |
3310 __ movq(rdx, rax); // Save object in rdx; rax needed for subtype check | |
3311 __ movq(rax, Address(rcx, rbx, | |
3312 Address::times_8, sizeof(constantPoolOopDesc))); | |
3313 | |
3314 __ bind(resolved); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3315 __ load_klass(rbx, rdx); |
0 | 3316 |
3317 // Generate subtype check. Blows rcx, rdi. Object in rdx. | |
3318 // Superklass in rax. Subklass in rbx. | |
3319 __ gen_subtype_check(rbx, ok_is_subtype); | |
3320 | |
3321 // Come here on failure | |
3322 __ push_ptr(rdx); | |
3323 // object is at TOS | |
3324 __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry)); | |
3325 | |
3326 // Come here on success | |
3327 __ bind(ok_is_subtype); | |
3328 __ movq(rax, rdx); // Restore object in rdx | |
3329 | |
3330 // Collect counts on whether this check-cast sees NULLs a lot or not. | |
3331 if (ProfileInterpreter) { | |
3332 __ jmp(done); | |
3333 __ bind(is_null); | |
3334 __ profile_null_seen(rcx); | |
3335 } else { | |
3336 __ bind(is_null); // same as 'done' | |
3337 } | |
3338 __ bind(done); | |
3339 } | |
3340 | |
3341 void TemplateTable::instanceof() { | |
3342 transition(atos, itos); | |
3343 Label done, is_null, ok_is_subtype, quicked, resolved; | |
3344 __ testq(rax, rax); | |
3345 __ jcc(Assembler::zero, is_null); | |
3346 | |
3347 // Get cpool & tags index | |
3348 __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array | |
3349 __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index | |
3350 // See if bytecode has already been quicked | |
3351 __ cmpb(Address(rdx, rbx, | |
3352 Address::times_1, | |
3353 typeArrayOopDesc::header_size(T_BYTE) * wordSize), | |
3354 JVM_CONSTANT_Class); | |
3355 __ jcc(Assembler::equal, quicked); | |
3356 | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3357 __ push(atos); // save receiver for result, and for GC |
0 | 3358 __ movq(r12, rcx); // save rcx |
3359 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3360 __ movq(rcx, r12); // restore rcx |
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3361 __ reinit_heapbase(); |
0 | 3362 __ pop_ptr(rdx); // restore receiver |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3363 __ load_klass(rdx, rdx); |
0 | 3364 __ jmpb(resolved); |
3365 | |
3366 // Get superklass in rax and subklass in rdx | |
3367 __ bind(quicked); | |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3368 __ load_klass(rdx, rax); |
0 | 3369 __ movq(rax, Address(rcx, rbx, |
113
ba764ed4b6f2
6420645: Create a vm that uses compressed oops for up to 32gb heapsizes
coleenp
parents:
0
diff
changeset
|
3370 Address::times_8, sizeof(constantPoolOopDesc))); |
0 | 3371 |
3372 __ bind(resolved); | |
3373 | |
3374 // Generate subtype check. Blows rcx, rdi | |
3375 // Superklass in rax. Subklass in rdx. | |
3376 __ gen_subtype_check(rdx, ok_is_subtype); | |
3377 | |
3378 // Come here on failure | |
3379 __ xorl(rax, rax); | |
3380 __ jmpb(done); | |
3381 // Come here on success | |
3382 __ bind(ok_is_subtype); | |
3383 __ movl(rax, 1); | |
3384 | |
3385 // Collect counts on whether this test sees NULLs a lot or not. | |
3386 if (ProfileInterpreter) { | |
3387 __ jmp(done); | |
3388 __ bind(is_null); | |
3389 __ profile_null_seen(rcx); | |
3390 } else { | |
3391 __ bind(is_null); // same as 'done' | |
3392 } | |
3393 __ bind(done); | |
3394 // rax = 0: obj == NULL or obj is not an instanceof the specified klass | |
3395 // rax = 1: obj != NULL and obj is an instanceof the specified klass | |
3396 } | |
3397 | |
3398 //----------------------------------------------------------------------------- | |
3399 // Breakpoints | |
3400 void TemplateTable::_breakpoint() { | |
3401 // Note: We get here even if we are single stepping.. | |
3402 // jbug inists on setting breakpoints at every bytecode | |
3403 // even if we are in single step mode. | |
3404 | |
3405 transition(vtos, vtos); | |
3406 | |
3407 // get the unpatched byte code | |
3408 __ get_method(c_rarg1); | |
3409 __ call_VM(noreg, | |
3410 CAST_FROM_FN_PTR(address, | |
3411 InterpreterRuntime::get_original_bytecode_at), | |
3412 c_rarg1, r13); | |
3413 __ movq(rbx, rax); | |
3414 | |
3415 // post the breakpoint event | |
3416 __ get_method(c_rarg1); | |
3417 __ call_VM(noreg, | |
3418 CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), | |
3419 c_rarg1, r13); | |
3420 | |
3421 // complete the execution of original bytecode | |
3422 __ dispatch_only_normal(vtos); | |
3423 } | |
3424 | |
3425 //----------------------------------------------------------------------------- | |
3426 // Exceptions | |
3427 | |
3428 void TemplateTable::athrow() { | |
3429 transition(atos, vtos); | |
3430 __ null_check(rax); | |
3431 __ jump(ExternalAddress(Interpreter::throw_exception_entry())); | |
3432 } | |
3433 | |
3434 //----------------------------------------------------------------------------- | |
3435 // Synchronization | |
3436 // | |
3437 // Note: monitorenter & exit are symmetric routines; which is reflected | |
3438 // in the assembly code structure as well | |
3439 // | |
3440 // Stack layout: | |
3441 // | |
3442 // [expressions ] <--- rsp = expression stack top | |
3443 // .. | |
3444 // [expressions ] | |
3445 // [monitor entry] <--- monitor block top = expression stack bot | |
3446 // .. | |
3447 // [monitor entry] | |
3448 // [frame data ] <--- monitor block bot | |
3449 // ... | |
3450 // [saved rbp ] <--- rbp | |
3451 void TemplateTable::monitorenter() { | |
3452 transition(atos, vtos); | |
3453 | |
3454 // check for NULL object | |
3455 __ null_check(rax); | |
3456 | |
3457 const Address monitor_block_top( | |
3458 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); | |
3459 const Address monitor_block_bot( | |
3460 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); | |
3461 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; | |
3462 | |
3463 Label allocated; | |
3464 | |
3465 // initialize entry pointer | |
3466 __ xorl(c_rarg1, c_rarg1); // points to free slot or NULL | |
3467 | |
3468 // find a free slot in the monitor block (result in c_rarg1) | |
3469 { | |
3470 Label entry, loop, exit; | |
3471 __ movq(c_rarg3, monitor_block_top); // points to current entry, | |
3472 // starting with top-most entry | |
3473 __ leaq(c_rarg2, monitor_block_bot); // points to word before bottom | |
3474 // of monitor block | |
3475 __ jmpb(entry); | |
3476 | |
3477 __ bind(loop); | |
3478 // check if current entry is used | |
3479 __ cmpq(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int) NULL); | |
3480 // if not used then remember entry in c_rarg1 | |
3481 __ cmovq(Assembler::equal, c_rarg1, c_rarg3); | |
3482 // check if current entry is for same object | |
3483 __ cmpq(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes())); | |
3484 // if same object then stop searching | |
3485 __ jccb(Assembler::equal, exit); | |
3486 // otherwise advance to next entry | |
3487 __ addq(c_rarg3, entry_size); | |
3488 __ bind(entry); | |
3489 // check if bottom reached | |
3490 __ cmpq(c_rarg3, c_rarg2); | |
3491 // if not at bottom then check this entry | |
3492 __ jcc(Assembler::notEqual, loop); | |
3493 __ bind(exit); | |
3494 } | |
3495 | |
3496 __ testq(c_rarg1, c_rarg1); // check if a slot has been found | |
3497 __ jcc(Assembler::notZero, allocated); // if found, continue with that one | |
3498 | |
3499 // allocate one if there's no free slot | |
3500 { | |
3501 Label entry, loop; | |
3502 // 1. compute new pointers // rsp: old expression stack top | |
3503 __ movq(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom | |
3504 __ subq(rsp, entry_size); // move expression stack top | |
3505 __ subq(c_rarg1, entry_size); // move expression stack bottom | |
3506 __ movq(c_rarg3, rsp); // set start value for copy loop | |
3507 __ movq(monitor_block_bot, c_rarg1); // set new monitor block bottom | |
3508 __ jmp(entry); | |
3509 // 2. move expression stack contents | |
3510 __ bind(loop); | |
3511 __ movq(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack | |
3512 // word from old location | |
3513 __ movq(Address(c_rarg3, 0), c_rarg2); // and store it at new location | |
3514 __ addq(c_rarg3, wordSize); // advance to next word | |
3515 __ bind(entry); | |
3516 __ cmpq(c_rarg3, c_rarg1); // check if bottom reached | |
3517 __ jcc(Assembler::notEqual, loop); // if not at bottom then | |
3518 // copy next word | |
3519 } | |
3520 | |
3521 // call run-time routine | |
3522 // c_rarg1: points to monitor entry | |
3523 __ bind(allocated); | |
3524 | |
3525 // Increment bcp to point to the next bytecode, so exception | |
3526 // handling for async. exceptions work correctly. | |
3527 // The object has already been poped from the stack, so the | |
3528 // expression stack looks correct. | |
3529 __ incrementq(r13); | |
3530 | |
3531 // store object | |
3532 __ movq(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax); | |
3533 __ lock_object(c_rarg1); | |
3534 | |
3535 // check to make sure this monitor doesn't cause stack overflow after locking | |
3536 __ save_bcp(); // in case of exception | |
3537 __ generate_stack_overflow_check(0); | |
3538 | |
3539 // The bcp has already been incremented. Just need to dispatch to | |
3540 // next instruction. | |
3541 __ dispatch_next(vtos); | |
3542 } | |
3543 | |
3544 | |
3545 void TemplateTable::monitorexit() { | |
3546 transition(atos, vtos); | |
3547 | |
3548 // check for NULL object | |
3549 __ null_check(rax); | |
3550 | |
3551 const Address monitor_block_top( | |
3552 rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize); | |
3553 const Address monitor_block_bot( | |
3554 rbp, frame::interpreter_frame_initial_sp_offset * wordSize); | |
3555 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; | |
3556 | |
3557 Label found; | |
3558 | |
3559 // find matching slot | |
3560 { | |
3561 Label entry, loop; | |
3562 __ movq(c_rarg1, monitor_block_top); // points to current entry, | |
3563 // starting with top-most entry | |
3564 __ leaq(c_rarg2, monitor_block_bot); // points to word before bottom | |
3565 // of monitor block | |
3566 __ jmpb(entry); | |
3567 | |
3568 __ bind(loop); | |
3569 // check if current entry is for same object | |
3570 __ cmpq(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); | |
3571 // if same object then stop searching | |
3572 __ jcc(Assembler::equal, found); | |
3573 // otherwise advance to next entry | |
3574 __ addq(c_rarg1, entry_size); | |
3575 __ bind(entry); | |
3576 // check if bottom reached | |
3577 __ cmpq(c_rarg1, c_rarg2); | |
3578 // if not at bottom then check this entry | |
3579 __ jcc(Assembler::notEqual, loop); | |
3580 } | |
3581 | |
3582 // error handling. Unlocking was not block-structured | |
3583 __ call_VM(noreg, CAST_FROM_FN_PTR(address, | |
3584 InterpreterRuntime::throw_illegal_monitor_state_exception)); | |
3585 __ should_not_reach_here(); | |
3586 | |
3587 // call run-time routine | |
3588 // rsi: points to monitor entry | |
3589 __ bind(found); | |
3590 __ push_ptr(rax); // make sure object is on stack (contract with oopMaps) | |
3591 __ unlock_object(c_rarg1); | |
3592 __ pop_ptr(rax); // discard object | |
3593 } | |
3594 | |
3595 | |
3596 // Wide instructions | |
3597 void TemplateTable::wide() { | |
3598 transition(vtos, vtos); | |
3599 __ load_unsigned_byte(rbx, at_bcp(1)); | |
3600 __ lea(rscratch1, ExternalAddress((address)Interpreter::_wentry_point)); | |
3601 __ jmp(Address(rscratch1, rbx, Address::times_8)); | |
3602 // Note: the r13 increment step is part of the individual wide | |
3603 // bytecode implementations | |
3604 } | |
3605 | |
3606 | |
3607 // Multi arrays | |
3608 void TemplateTable::multianewarray() { | |
3609 transition(vtos, atos); | |
3610 __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions | |
3611 // last dim is on top of stack; we want address of first one: | |
3612 // first_addr = last_addr + (ndims - 1) * wordSize | |
3613 if (TaggedStackInterpreter) __ shll(rax, 1); // index*2 | |
3614 __ leaq(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize)); | |
3615 call_VM(rax, | |
3616 CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), | |
3617 c_rarg1); | |
3618 __ load_unsigned_byte(rbx, at_bcp(3)); | |
3619 if (TaggedStackInterpreter) __ shll(rbx, 1); // index*2 | |
3620 __ leaq(rsp, Address(rsp, rbx, Address::times_8)); | |
3621 } |