comparison src/cpu/sparc/vm/templateTable_sparc.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children ba764ed4b6f2
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 #include "incls/_precompiled.incl"
26 #include "incls/_templateTable_sparc.cpp.incl"
27
28 #ifndef CC_INTERP
29 #define __ _masm->
30
31
32 //----------------------------------------------------------------------------------------------------
33 // Platform-dependent initialization
34
35 void TemplateTable::pd_initialize() {
36 // (none)
37 }
38
39
40 //----------------------------------------------------------------------------------------------------
41 // Condition conversion
42 Assembler::Condition ccNot(TemplateTable::Condition cc) {
43 switch (cc) {
44 case TemplateTable::equal : return Assembler::notEqual;
45 case TemplateTable::not_equal : return Assembler::equal;
46 case TemplateTable::less : return Assembler::greaterEqual;
47 case TemplateTable::less_equal : return Assembler::greater;
48 case TemplateTable::greater : return Assembler::lessEqual;
49 case TemplateTable::greater_equal: return Assembler::less;
50 }
51 ShouldNotReachHere();
52 return Assembler::zero;
53 }
54
55 //----------------------------------------------------------------------------------------------------
56 // Miscelaneous helper routines
57
58
59 Address TemplateTable::at_bcp(int offset) {
60 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
61 return Address( Lbcp, 0, offset);
62 }
63
64
65 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register Rbyte_code,
66 Register Rscratch,
67 bool load_bc_into_scratch /*=true*/) {
68 // With sharing on, may need to test methodOop flag.
69 if (!RewriteBytecodes) return;
70 if (load_bc_into_scratch) __ set(bc, Rbyte_code);
71 Label patch_done;
72 if (JvmtiExport::can_post_breakpoint()) {
73 Label fast_patch;
74 __ ldub(at_bcp(0), Rscratch);
75 __ cmp(Rscratch, Bytecodes::_breakpoint);
76 __ br(Assembler::notEqual, false, Assembler::pt, fast_patch);
77 __ delayed()->nop(); // don't bother to hoist the stb here
78 // perform the quickening, slowly, in the bowels of the breakpoint table
79 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, Rbyte_code);
80 __ ba(false, patch_done);
81 __ delayed()->nop();
82 __ bind(fast_patch);
83 }
84 #ifdef ASSERT
85 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc);
86 Label okay;
87 __ ldub(at_bcp(0), Rscratch);
88 __ cmp(Rscratch, orig_bytecode);
89 __ br(Assembler::equal, false, Assembler::pt, okay);
90 __ delayed() ->cmp(Rscratch, Rbyte_code);
91 __ br(Assembler::equal, false, Assembler::pt, okay);
92 __ delayed()->nop();
93 __ stop("Rewriting wrong bytecode location");
94 __ bind(okay);
95 #endif
96 __ stb(Rbyte_code, at_bcp(0));
97 __ bind(patch_done);
98 }
99
100 //----------------------------------------------------------------------------------------------------
101 // Individual instructions
102
103 void TemplateTable::nop() {
104 transition(vtos, vtos);
105 // nothing to do
106 }
107
108 void TemplateTable::shouldnotreachhere() {
109 transition(vtos, vtos);
110 __ stop("shouldnotreachhere bytecode");
111 }
112
113 void TemplateTable::aconst_null() {
114 transition(vtos, atos);
115 __ clr(Otos_i);
116 }
117
118
119 void TemplateTable::iconst(int value) {
120 transition(vtos, itos);
121 __ set(value, Otos_i);
122 }
123
124
125 void TemplateTable::lconst(int value) {
126 transition(vtos, ltos);
127 assert(value >= 0, "check this code");
128 #ifdef _LP64
129 __ set(value, Otos_l);
130 #else
131 __ set(value, Otos_l2);
132 __ clr( Otos_l1);
133 #endif
134 }
135
136
137 void TemplateTable::fconst(int value) {
138 transition(vtos, ftos);
139 static float zero = 0.0, one = 1.0, two = 2.0;
140 float* p;
141 switch( value ) {
142 default: ShouldNotReachHere();
143 case 0: p = &zero; break;
144 case 1: p = &one; break;
145 case 2: p = &two; break;
146 }
147 Address a(G3_scratch, (address)p);
148 __ sethi(a);
149 __ ldf(FloatRegisterImpl::S, a, Ftos_f);
150 }
151
152
153 void TemplateTable::dconst(int value) {
154 transition(vtos, dtos);
155 static double zero = 0.0, one = 1.0;
156 double* p;
157 switch( value ) {
158 default: ShouldNotReachHere();
159 case 0: p = &zero; break;
160 case 1: p = &one; break;
161 }
162 Address a(G3_scratch, (address)p);
163 __ sethi(a);
164 __ ldf(FloatRegisterImpl::D, a, Ftos_d);
165 }
166
167
168 // %%%%% Should factore most snippet templates across platforms
169
170 void TemplateTable::bipush() {
171 transition(vtos, itos);
172 __ ldsb( at_bcp(1), Otos_i );
173 }
174
175 void TemplateTable::sipush() {
176 transition(vtos, itos);
177 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed);
178 }
179
180 void TemplateTable::ldc(bool wide) {
181 transition(vtos, vtos);
182 Label call_ldc, notInt, notString, notClass, exit;
183
184 if (wide) {
185 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
186 } else {
187 __ ldub(Lbcp, 1, O1);
188 }
189 __ get_cpool_and_tags(O0, O2);
190
191 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
192 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
193
194 // get type from tags
195 __ add(O2, tags_offset, O2);
196 __ ldub(O2, O1, O2);
197 __ cmp(O2, JVM_CONSTANT_UnresolvedString); // unresolved string? If so, must resolve
198 __ brx(Assembler::equal, true, Assembler::pt, call_ldc);
199 __ delayed()->nop();
200
201 __ cmp(O2, JVM_CONSTANT_UnresolvedClass); // unresolved class? If so, must resolve
202 __ brx(Assembler::equal, true, Assembler::pt, call_ldc);
203 __ delayed()->nop();
204
205 __ cmp(O2, JVM_CONSTANT_UnresolvedClassInError); // unresolved class in error state
206 __ brx(Assembler::equal, true, Assembler::pn, call_ldc);
207 __ delayed()->nop();
208
209 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class
210 __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
211 __ delayed()->add(O0, base_offset, O0);
212
213 __ bind(call_ldc);
214 __ set(wide, O1);
215 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
216 __ push(atos);
217 __ ba(false, exit);
218 __ delayed()->nop();
219
220 __ bind(notClass);
221 // __ add(O0, base_offset, O0);
222 __ sll(O1, LogBytesPerWord, O1);
223 __ cmp(O2, JVM_CONSTANT_Integer);
224 __ brx(Assembler::notEqual, true, Assembler::pt, notInt);
225 __ delayed()->cmp(O2, JVM_CONSTANT_String);
226 __ ld(O0, O1, Otos_i);
227 __ push(itos);
228 __ ba(false, exit);
229 __ delayed()->nop();
230
231 __ bind(notInt);
232 // __ cmp(O2, JVM_CONSTANT_String);
233 __ brx(Assembler::notEqual, true, Assembler::pt, notString);
234 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
235 __ ld_ptr(O0, O1, Otos_i);
236 __ verify_oop(Otos_i);
237 __ push(atos);
238 __ ba(false, exit);
239 __ delayed()->nop();
240
241 __ bind(notString);
242 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
243 __ push(ftos);
244
245 __ bind(exit);
246 }
247
248 void TemplateTable::ldc2_w() {
249 transition(vtos, vtos);
250 Label retry, resolved, Long, exit;
251
252 __ bind(retry);
253 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
254 __ get_cpool_and_tags(O0, O2);
255
256 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
257 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
258 // get type from tags
259 __ add(O2, tags_offset, O2);
260 __ ldub(O2, O1, O2);
261
262 __ sll(O1, LogBytesPerWord, O1);
263 __ add(O0, O1, G3_scratch);
264
265 __ cmp(O2, JVM_CONSTANT_Double);
266 __ brx(Assembler::notEqual, false, Assembler::pt, Long);
267 __ delayed()->nop();
268 // A double can be placed at word-aligned locations in the constant pool.
269 // Check out Conversions.java for an example.
270 // Also constantPoolOopDesc::header_size() is 20, which makes it very difficult
271 // to double-align double on the constant pool. SG, 11/7/97
272 #ifdef _LP64
273 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
274 #else
275 FloatRegister f = Ftos_d;
276 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
277 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
278 f->successor());
279 #endif
280 __ push(dtos);
281 __ ba(false, exit);
282 __ delayed()->nop();
283
284 __ bind(Long);
285 #ifdef _LP64
286 __ ldx(G3_scratch, base_offset, Otos_l);
287 #else
288 __ ld(G3_scratch, base_offset, Otos_l);
289 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
290 #endif
291 __ push(ltos);
292
293 __ bind(exit);
294 }
295
296
297 void TemplateTable::locals_index(Register reg, int offset) {
298 __ ldub( at_bcp(offset), reg );
299 }
300
301
302 void TemplateTable::locals_index_wide(Register reg) {
303 // offset is 2, not 1, because Lbcp points to wide prefix code
304 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
305 }
306
307 void TemplateTable::iload() {
308 transition(vtos, itos);
309 // Rewrite iload,iload pair into fast_iload2
310 // iload,caload pair into fast_icaload
311 if (RewriteFrequentPairs) {
312 Label rewrite, done;
313
314 // get next byte
315 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch);
316
317 // if _iload, wait to rewrite to iload2. We only want to rewrite the
318 // last two iloads in a pair. Comparing against fast_iload means that
319 // the next bytecode is neither an iload or a caload, and therefore
320 // an iload pair.
321 __ cmp(G3_scratch, (int)Bytecodes::_iload);
322 __ br(Assembler::equal, false, Assembler::pn, done);
323 __ delayed()->nop();
324
325 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
326 __ br(Assembler::equal, false, Assembler::pn, rewrite);
327 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch);
328
329 __ cmp(G3_scratch, (int)Bytecodes::_caload);
330 __ br(Assembler::equal, false, Assembler::pn, rewrite);
331 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch);
332
333 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again
334 // rewrite
335 // G4_scratch: fast bytecode
336 __ bind(rewrite);
337 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false);
338 __ bind(done);
339 }
340
341 // Get the local value into tos
342 locals_index(G3_scratch);
343 __ access_local_int( G3_scratch, Otos_i );
344 }
345
346 void TemplateTable::fast_iload2() {
347 transition(vtos, itos);
348 locals_index(G3_scratch);
349 __ access_local_int( G3_scratch, Otos_i );
350 __ push_i();
351 locals_index(G3_scratch, 3); // get next bytecode's local index.
352 __ access_local_int( G3_scratch, Otos_i );
353 }
354
355 void TemplateTable::fast_iload() {
356 transition(vtos, itos);
357 locals_index(G3_scratch);
358 __ access_local_int( G3_scratch, Otos_i );
359 }
360
361 void TemplateTable::lload() {
362 transition(vtos, ltos);
363 locals_index(G3_scratch);
364 __ access_local_long( G3_scratch, Otos_l );
365 }
366
367
368 void TemplateTable::fload() {
369 transition(vtos, ftos);
370 locals_index(G3_scratch);
371 __ access_local_float( G3_scratch, Ftos_f );
372 }
373
374
375 void TemplateTable::dload() {
376 transition(vtos, dtos);
377 locals_index(G3_scratch);
378 __ access_local_double( G3_scratch, Ftos_d );
379 }
380
381
382 void TemplateTable::aload() {
383 transition(vtos, atos);
384 locals_index(G3_scratch);
385 __ access_local_ptr( G3_scratch, Otos_i);
386 }
387
388
389 void TemplateTable::wide_iload() {
390 transition(vtos, itos);
391 locals_index_wide(G3_scratch);
392 __ access_local_int( G3_scratch, Otos_i );
393 }
394
395
396 void TemplateTable::wide_lload() {
397 transition(vtos, ltos);
398 locals_index_wide(G3_scratch);
399 __ access_local_long( G3_scratch, Otos_l );
400 }
401
402
403 void TemplateTable::wide_fload() {
404 transition(vtos, ftos);
405 locals_index_wide(G3_scratch);
406 __ access_local_float( G3_scratch, Ftos_f );
407 }
408
409
410 void TemplateTable::wide_dload() {
411 transition(vtos, dtos);
412 locals_index_wide(G3_scratch);
413 __ access_local_double( G3_scratch, Ftos_d );
414 }
415
416
417 void TemplateTable::wide_aload() {
418 transition(vtos, atos);
419 locals_index_wide(G3_scratch);
420 __ access_local_ptr( G3_scratch, Otos_i );
421 __ verify_oop(Otos_i);
422 }
423
424
425 void TemplateTable::iaload() {
426 transition(itos, itos);
427 // Otos_i: index
428 // tos: array
429 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
430 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i);
431 }
432
433
434 void TemplateTable::laload() {
435 transition(itos, ltos);
436 // Otos_i: index
437 // O2: array
438 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
439 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l);
440 }
441
442
443 void TemplateTable::faload() {
444 transition(itos, ftos);
445 // Otos_i: index
446 // O2: array
447 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
448 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f);
449 }
450
451
452 void TemplateTable::daload() {
453 transition(itos, dtos);
454 // Otos_i: index
455 // O2: array
456 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
457 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d);
458 }
459
460
461 void TemplateTable::aaload() {
462 transition(itos, atos);
463 // Otos_i: index
464 // tos: array
465 __ index_check(O2, Otos_i, LogBytesPerWord, G3_scratch, O3);
466 __ ld_ptr(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
467 __ verify_oop(Otos_i);
468 }
469
470
471 void TemplateTable::baload() {
472 transition(itos, itos);
473 // Otos_i: index
474 // tos: array
475 __ index_check(O2, Otos_i, 0, G3_scratch, O3);
476 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i);
477 }
478
479
480 void TemplateTable::caload() {
481 transition(itos, itos);
482 // Otos_i: index
483 // tos: array
484 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
485 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
486 }
487
488 void TemplateTable::fast_icaload() {
489 transition(vtos, itos);
490 // Otos_i: index
491 // tos: array
492 locals_index(G3_scratch);
493 __ access_local_int( G3_scratch, Otos_i );
494 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
495 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
496 }
497
498
499 void TemplateTable::saload() {
500 transition(itos, itos);
501 // Otos_i: index
502 // tos: array
503 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
504 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i);
505 }
506
507
508 void TemplateTable::iload(int n) {
509 transition(vtos, itos);
510 debug_only(__ verify_local_tag(frame::TagValue, Llocals, Otos_i, n));
511 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
512 }
513
514
515 void TemplateTable::lload(int n) {
516 transition(vtos, ltos);
517 assert(n+1 < Argument::n_register_parameters, "would need more code");
518 debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, Otos_l, n));
519 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
520 }
521
522
523 void TemplateTable::fload(int n) {
524 transition(vtos, ftos);
525 assert(n < Argument::n_register_parameters, "would need more code");
526 debug_only(__ verify_local_tag(frame::TagValue, Llocals, G3_scratch, n));
527 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f );
528 }
529
530
531 void TemplateTable::dload(int n) {
532 transition(vtos, dtos);
533 FloatRegister dst = Ftos_d;
534 debug_only(__ verify_local_tag(frame::TagCategory2, Llocals, G3_scratch, n));
535 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
536 }
537
538
539 void TemplateTable::aload(int n) {
540 transition(vtos, atos);
541 debug_only(__ verify_local_tag(frame::TagReference, Llocals, Otos_i, n));
542 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
543 }
544
545
546 void TemplateTable::aload_0() {
547 transition(vtos, atos);
548
549 // According to bytecode histograms, the pairs:
550 //
551 // _aload_0, _fast_igetfield (itos)
552 // _aload_0, _fast_agetfield (atos)
553 // _aload_0, _fast_fgetfield (ftos)
554 //
555 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
556 // bytecode checks the next bytecode and then rewrites the current
557 // bytecode into a pair bytecode; otherwise it rewrites the current
558 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
559 //
560 if (RewriteFrequentPairs) {
561 Label rewrite, done;
562
563 // get next byte
564 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
565
566 // do actual aload_0
567 aload(0);
568
569 // if _getfield then wait with rewrite
570 __ cmp(G3_scratch, (int)Bytecodes::_getfield);
571 __ br(Assembler::equal, false, Assembler::pn, done);
572 __ delayed()->nop();
573
574 // if _igetfield then rewrite to _fast_iaccess_0
575 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
576 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield);
577 __ br(Assembler::equal, false, Assembler::pn, rewrite);
578 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch);
579
580 // if _agetfield then rewrite to _fast_aaccess_0
581 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
582 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield);
583 __ br(Assembler::equal, false, Assembler::pn, rewrite);
584 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch);
585
586 // if _fgetfield then rewrite to _fast_faccess_0
587 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
588 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield);
589 __ br(Assembler::equal, false, Assembler::pn, rewrite);
590 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch);
591
592 // else rewrite to _fast_aload0
593 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
594 __ set(Bytecodes::_fast_aload_0, G4_scratch);
595
596 // rewrite
597 // G4_scratch: fast bytecode
598 __ bind(rewrite);
599 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
600 __ bind(done);
601 } else {
602 aload(0);
603 }
604 }
605
606
607 void TemplateTable::istore() {
608 transition(itos, vtos);
609 locals_index(G3_scratch);
610 __ store_local_int( G3_scratch, Otos_i );
611 }
612
613
614 void TemplateTable::lstore() {
615 transition(ltos, vtos);
616 locals_index(G3_scratch);
617 __ store_local_long( G3_scratch, Otos_l );
618 }
619
620
621 void TemplateTable::fstore() {
622 transition(ftos, vtos);
623 locals_index(G3_scratch);
624 __ store_local_float( G3_scratch, Ftos_f );
625 }
626
627
628 void TemplateTable::dstore() {
629 transition(dtos, vtos);
630 locals_index(G3_scratch);
631 __ store_local_double( G3_scratch, Ftos_d );
632 }
633
634
635 void TemplateTable::astore() {
636 transition(vtos, vtos);
637 // astore tos can also be a returnAddress, so load and store the tag too
638 __ load_ptr_and_tag(0, Otos_i, Otos_l2);
639 __ inc(Lesp, Interpreter::stackElementSize());
640 __ verify_oop_or_return_address(Otos_i, G3_scratch);
641 locals_index(G3_scratch);
642 __ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
643 }
644
645
646 void TemplateTable::wide_istore() {
647 transition(vtos, vtos);
648 __ pop_i();
649 locals_index_wide(G3_scratch);
650 __ store_local_int( G3_scratch, Otos_i );
651 }
652
653
654 void TemplateTable::wide_lstore() {
655 transition(vtos, vtos);
656 __ pop_l();
657 locals_index_wide(G3_scratch);
658 __ store_local_long( G3_scratch, Otos_l );
659 }
660
661
662 void TemplateTable::wide_fstore() {
663 transition(vtos, vtos);
664 __ pop_f();
665 locals_index_wide(G3_scratch);
666 __ store_local_float( G3_scratch, Ftos_f );
667 }
668
669
670 void TemplateTable::wide_dstore() {
671 transition(vtos, vtos);
672 __ pop_d();
673 locals_index_wide(G3_scratch);
674 __ store_local_double( G3_scratch, Ftos_d );
675 }
676
677
678 void TemplateTable::wide_astore() {
679 transition(vtos, vtos);
680 // astore tos can also be a returnAddress, so load and store the tag too
681 __ load_ptr_and_tag(0, Otos_i, Otos_l2);
682 __ inc(Lesp, Interpreter::stackElementSize());
683 __ verify_oop_or_return_address(Otos_i, G3_scratch);
684 locals_index_wide(G3_scratch);
685 __ store_local_ptr( G3_scratch, Otos_i, Otos_l2 );
686 }
687
688
689 void TemplateTable::iastore() {
690 transition(itos, vtos);
691 __ pop_i(O2); // index
692 // Otos_i: val
693 // O3: array
694 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
695 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT));
696 }
697
698
699 void TemplateTable::lastore() {
700 transition(ltos, vtos);
701 __ pop_i(O2); // index
702 // Otos_l: val
703 // O3: array
704 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
705 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG));
706 }
707
708
709 void TemplateTable::fastore() {
710 transition(ftos, vtos);
711 __ pop_i(O2); // index
712 // Ftos_f: val
713 // O3: array
714 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
715 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
716 }
717
718
719 void TemplateTable::dastore() {
720 transition(dtos, vtos);
721 __ pop_i(O2); // index
722 // Fos_d: val
723 // O3: array
724 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
725 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
726 }
727
728
729 void TemplateTable::aastore() {
730 Label store_ok, is_null, done;
731 transition(vtos, vtos);
732 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
733 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index
734 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array
735 // Otos_i: val
736 // O2: index
737 // O3: array
738 __ verify_oop(Otos_i);
739 __ index_check_without_pop(O3, O2, LogBytesPerWord, G3_scratch, O1);
740
741 // do array store check - check for NULL value first
742 __ br_null( Otos_i, false, Assembler::pn, is_null );
743 __ delayed()->
744 ld_ptr(O3, oopDesc::klass_offset_in_bytes(), O4); // get array klass
745
746 // do fast instanceof cache test
747 __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), O5); // get value klass
748
749 __ ld_ptr(O4, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(), O4);
750
751 assert(Otos_i == O0, "just checking");
752
753 // Otos_i: value
754 // O1: addr - offset
755 // O2: index
756 // O3: array
757 // O4: array element klass
758 // O5: value klass
759
760 // Generate a fast subtype check. Branch to store_ok if no
761 // failure. Throw if failure.
762 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
763
764 // Not a subtype; so must throw exception
765 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch );
766
767 // Store is OK.
768 __ bind(store_ok);
769 __ st_ptr(Otos_i, O1, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
770 // Quote from rememberedSet.hpp: For objArrays, the precise card
771 // corresponding to the pointer store is dirtied so we don't need to
772 // scavenge the entire array.
773 Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
774 __ add(element, O1); // address the element precisely
775 __ store_check(G3_scratch, O1);
776 __ ba(false,done);
777 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
778
779 __ bind(is_null);
780 __ st_ptr(Otos_i, element);
781 __ profile_null_seen(G3_scratch);
782 __ inc(Lesp, 3* Interpreter::stackElementSize()); // adj sp (pops array, index and value)
783 __ bind(done);
784 }
785
786
787 void TemplateTable::bastore() {
788 transition(itos, vtos);
789 __ pop_i(O2); // index
790 // Otos_i: val
791 // O3: array
792 __ index_check(O3, O2, 0, G3_scratch, O2);
793 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE));
794 }
795
796
797 void TemplateTable::castore() {
798 transition(itos, vtos);
799 __ pop_i(O2); // index
800 // Otos_i: val
801 // O3: array
802 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2);
803 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
804 }
805
806
807 void TemplateTable::sastore() {
808 // %%%%% Factor across platform
809 castore();
810 }
811
812
813 void TemplateTable::istore(int n) {
814 transition(itos, vtos);
815 __ tag_local(frame::TagValue, Llocals, Otos_i, n);
816 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
817 }
818
819
820 void TemplateTable::lstore(int n) {
821 transition(ltos, vtos);
822 assert(n+1 < Argument::n_register_parameters, "only handle register cases");
823 __ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
824 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
825
826 }
827
828
829 void TemplateTable::fstore(int n) {
830 transition(ftos, vtos);
831 assert(n < Argument::n_register_parameters, "only handle register cases");
832 __ tag_local(frame::TagValue, Llocals, Otos_l, n);
833 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
834 }
835
836
837 void TemplateTable::dstore(int n) {
838 transition(dtos, vtos);
839 FloatRegister src = Ftos_d;
840 __ tag_local(frame::TagCategory2, Llocals, Otos_l, n);
841 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
842 }
843
844
845 void TemplateTable::astore(int n) {
846 transition(vtos, vtos);
847 // astore tos can also be a returnAddress, so load and store the tag too
848 __ load_ptr_and_tag(0, Otos_i, Otos_l2);
849 __ inc(Lesp, Interpreter::stackElementSize());
850 __ verify_oop_or_return_address(Otos_i, G3_scratch);
851 __ store_local_ptr( n, Otos_i, Otos_l2 );
852 }
853
854
855 void TemplateTable::pop() {
856 transition(vtos, vtos);
857 __ inc(Lesp, Interpreter::stackElementSize());
858 }
859
860
861 void TemplateTable::pop2() {
862 transition(vtos, vtos);
863 __ inc(Lesp, 2 * Interpreter::stackElementSize());
864 }
865
866
867 void TemplateTable::dup() {
868 transition(vtos, vtos);
869 // stack: ..., a
870 // load a and tag
871 __ load_ptr_and_tag(0, Otos_i, Otos_l2);
872 __ push_ptr(Otos_i, Otos_l2);
873 // stack: ..., a, a
874 }
875
876
877 void TemplateTable::dup_x1() {
878 transition(vtos, vtos);
879 // stack: ..., a, b
880 __ load_ptr_and_tag(1, G3_scratch, G4_scratch); // get a
881 __ load_ptr_and_tag(0, Otos_l1, Otos_l2); // get b
882 __ store_ptr_and_tag(1, Otos_l1, Otos_l2); // put b
883 __ store_ptr_and_tag(0, G3_scratch, G4_scratch); // put a - like swap
884 __ push_ptr(Otos_l1, Otos_l2); // push b
885 // stack: ..., b, a, b
886 }
887
888
889 void TemplateTable::dup_x2() {
890 transition(vtos, vtos);
891 // stack: ..., a, b, c
892 // get c and push on stack, reuse registers
893 __ load_ptr_and_tag(0, G3_scratch, G4_scratch); // get c
894 __ push_ptr(G3_scratch, G4_scratch); // push c with tag
895 // stack: ..., a, b, c, c (c in reg) (Lesp - 4)
896 // (stack offsets n+1 now)
897 __ load_ptr_and_tag(3, Otos_l1, Otos_l2); // get a
898 __ store_ptr_and_tag(3, G3_scratch, G4_scratch); // put c at 3
899 // stack: ..., c, b, c, c (a in reg)
900 __ load_ptr_and_tag(2, G3_scratch, G4_scratch); // get b
901 __ store_ptr_and_tag(2, Otos_l1, Otos_l2); // put a at 2
902 // stack: ..., c, a, c, c (b in reg)
903 __ store_ptr_and_tag(1, G3_scratch, G4_scratch); // put b at 1
904 // stack: ..., c, a, b, c
905 }
906
907
908 void TemplateTable::dup2() {
909 transition(vtos, vtos);
910 __ load_ptr_and_tag(1, G3_scratch, G4_scratch); // get a
911 __ load_ptr_and_tag(0, Otos_l1, Otos_l2); // get b
912 __ push_ptr(G3_scratch, G4_scratch); // push a
913 __ push_ptr(Otos_l1, Otos_l2); // push b
914 // stack: ..., a, b, a, b
915 }
916
917
918 void TemplateTable::dup2_x1() {
919 transition(vtos, vtos);
920 // stack: ..., a, b, c
921 __ load_ptr_and_tag(1, Lscratch, G1_scratch); // get b
922 __ load_ptr_and_tag(2, Otos_l1, Otos_l2); // get a
923 __ store_ptr_and_tag(2, Lscratch, G1_scratch); // put b at a
924 // stack: ..., b, b, c
925 __ load_ptr_and_tag(0, G3_scratch, G4_scratch); // get c
926 __ store_ptr_and_tag(1, G3_scratch, G4_scratch); // put c at b
927 // stack: ..., b, c, c
928 __ store_ptr_and_tag(0, Otos_l1, Otos_l2); // put a at c
929 // stack: ..., b, c, a
930 __ push_ptr(Lscratch, G1_scratch); // push b
931 __ push_ptr(G3_scratch, G4_scratch); // push c
932 // stack: ..., b, c, a, b, c
933 }
934
935
936 // The spec says that these types can be a mixture of category 1 (1 word)
937 // types and/or category 2 types (long and doubles)
938 void TemplateTable::dup2_x2() {
939 transition(vtos, vtos);
940 // stack: ..., a, b, c, d
941 __ load_ptr_and_tag(1, Lscratch, G1_scratch); // get c
942 __ load_ptr_and_tag(3, Otos_l1, Otos_l2); // get a
943 __ store_ptr_and_tag(3, Lscratch, G1_scratch); // put c at 3
944 __ store_ptr_and_tag(1, Otos_l1, Otos_l2); // put a at 1
945 // stack: ..., c, b, a, d
946 __ load_ptr_and_tag(2, G3_scratch, G4_scratch); // get b
947 __ load_ptr_and_tag(0, Otos_l1, Otos_l2); // get d
948 __ store_ptr_and_tag(0, G3_scratch, G4_scratch); // put b at 0
949 __ store_ptr_and_tag(2, Otos_l1, Otos_l2); // put d at 2
950 // stack: ..., c, d, a, b
951 __ push_ptr(Lscratch, G1_scratch); // push c
952 __ push_ptr(Otos_l1, Otos_l2); // push d
953 // stack: ..., c, d, a, b, c, d
954 }
955
956
957 void TemplateTable::swap() {
958 transition(vtos, vtos);
959 // stack: ..., a, b
960 __ load_ptr_and_tag(1, G3_scratch, G4_scratch); // get a
961 __ load_ptr_and_tag(0, Otos_l1, Otos_l2); // get b
962 __ store_ptr_and_tag(0, G3_scratch, G4_scratch); // put b
963 __ store_ptr_and_tag(1, Otos_l1, Otos_l2); // put a
964 // stack: ..., b, a
965 }
966
967
968 void TemplateTable::iop2(Operation op) {
969 transition(itos, itos);
970 __ pop_i(O1);
971 switch (op) {
972 case add: __ add(O1, Otos_i, Otos_i); break;
973 case sub: __ sub(O1, Otos_i, Otos_i); break;
974 // %%%%% Mul may not exist: better to call .mul?
975 case mul: __ smul(O1, Otos_i, Otos_i); break;
976 case _and: __ and3(O1, Otos_i, Otos_i); break;
977 case _or: __ or3(O1, Otos_i, Otos_i); break;
978 case _xor: __ xor3(O1, Otos_i, Otos_i); break;
979 case shl: __ sll(O1, Otos_i, Otos_i); break;
980 case shr: __ sra(O1, Otos_i, Otos_i); break;
981 case ushr: __ srl(O1, Otos_i, Otos_i); break;
982 default: ShouldNotReachHere();
983 }
984 }
985
986
987 void TemplateTable::lop2(Operation op) {
988 transition(ltos, ltos);
989 __ pop_l(O2);
990 switch (op) {
991 #ifdef _LP64
992 case add: __ add(O2, Otos_l, Otos_l); break;
993 case sub: __ sub(O2, Otos_l, Otos_l); break;
994 case _and: __ and3( O2, Otos_l, Otos_l); break;
995 case _or: __ or3( O2, Otos_l, Otos_l); break;
996 case _xor: __ xor3( O2, Otos_l, Otos_l); break;
997 #else
998 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
999 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
1000 case _and: __ and3( O3, Otos_l2, Otos_l2); __ and3( O2, Otos_l1, Otos_l1); break;
1001 case _or: __ or3( O3, Otos_l2, Otos_l2); __ or3( O2, Otos_l1, Otos_l1); break;
1002 case _xor: __ xor3( O3, Otos_l2, Otos_l2); __ xor3( O2, Otos_l1, Otos_l1); break;
1003 #endif
1004 default: ShouldNotReachHere();
1005 }
1006 }
1007
1008
1009 void TemplateTable::idiv() {
1010 // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
1011 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
1012
1013 transition(itos, itos);
1014 __ pop_i(O1); // get 1st op
1015
1016 // Y contains upper 32 bits of result, set it to 0 or all ones
1017 __ wry(G0);
1018 __ mov(~0, G3_scratch);
1019
1020 __ tst(O1);
1021 Label neg;
1022 __ br(Assembler::negative, true, Assembler::pn, neg);
1023 __ delayed()->wry(G3_scratch);
1024 __ bind(neg);
1025
1026 Label ok;
1027 __ tst(Otos_i);
1028 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
1029
1030 const int min_int = 0x80000000;
1031 Label regular;
1032 __ cmp(Otos_i, -1);
1033 __ br(Assembler::notEqual, false, Assembler::pt, regular);
1034 #ifdef _LP64
1035 // Don't put set in delay slot
1036 // Set will turn into multiple instructions in 64 bit mode
1037 __ delayed()->nop();
1038 __ set(min_int, G4_scratch);
1039 #else
1040 __ delayed()->set(min_int, G4_scratch);
1041 #endif
1042 Label done;
1043 __ cmp(O1, G4_scratch);
1044 __ br(Assembler::equal, true, Assembler::pt, done);
1045 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken)
1046
1047 __ bind(regular);
1048 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
1049 __ bind(done);
1050 }
1051
1052
1053 void TemplateTable::irem() {
1054 transition(itos, itos);
1055 __ mov(Otos_i, O2); // save divisor
1056 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1
1057 __ smul(Otos_i, O2, Otos_i);
1058 __ sub(O1, Otos_i, Otos_i);
1059 }
1060
1061
1062 void TemplateTable::lmul() {
1063 transition(ltos, ltos);
1064 __ pop_l(O2);
1065 #ifdef _LP64
1066 __ mulx(Otos_l, O2, Otos_l);
1067 #else
1068 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
1069 #endif
1070
1071 }
1072
1073
1074 void TemplateTable::ldiv() {
1075 transition(ltos, ltos);
1076
1077 // check for zero
1078 __ pop_l(O2);
1079 #ifdef _LP64
1080 __ tst(Otos_l);
1081 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1082 __ sdivx(O2, Otos_l, Otos_l);
1083 #else
1084 __ orcc(Otos_l1, Otos_l2, G0);
1085 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1086 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1087 #endif
1088 }
1089
1090
1091 void TemplateTable::lrem() {
1092 transition(ltos, ltos);
1093
1094 // check for zero
1095 __ pop_l(O2);
1096 #ifdef _LP64
1097 __ tst(Otos_l);
1098 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1099 __ sdivx(O2, Otos_l, Otos_l2);
1100 __ mulx (Otos_l2, Otos_l, Otos_l2);
1101 __ sub (O2, Otos_l2, Otos_l);
1102 #else
1103 __ orcc(Otos_l1, Otos_l2, G0);
1104 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1105 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1106 #endif
1107 }
1108
1109
1110 void TemplateTable::lshl() {
1111 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
1112
1113 __ pop_l(O2); // shift value in O2, O3
1114 #ifdef _LP64
1115 __ sllx(O2, Otos_i, Otos_l);
1116 #else
1117 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1118 #endif
1119 }
1120
1121
1122 void TemplateTable::lshr() {
1123 transition(itos, ltos); // %%%% see lshl comment
1124
1125 __ pop_l(O2); // shift value in O2, O3
1126 #ifdef _LP64
1127 __ srax(O2, Otos_i, Otos_l);
1128 #else
1129 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1130 #endif
1131 }
1132
1133
1134
1135 void TemplateTable::lushr() {
1136 transition(itos, ltos); // %%%% see lshl comment
1137
1138 __ pop_l(O2); // shift value in O2, O3
1139 #ifdef _LP64
1140 __ srlx(O2, Otos_i, Otos_l);
1141 #else
1142 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1143 #endif
1144 }
1145
1146
1147 void TemplateTable::fop2(Operation op) {
1148 transition(ftos, ftos);
1149 switch (op) {
1150 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1151 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1152 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1153 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1154 case rem:
1155 assert(Ftos_f == F0, "just checking");
1156 #ifdef _LP64
1157 // LP64 calling conventions use F1, F3 for passing 2 floats
1158 __ pop_f(F1);
1159 __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
1160 #else
1161 __ pop_i(O0);
1162 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
1163 __ ld( __ d_tmp, O1 );
1164 #endif
1165 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1166 assert( Ftos_f == F0, "fix this code" );
1167 break;
1168
1169 default: ShouldNotReachHere();
1170 }
1171 }
1172
1173
1174 void TemplateTable::dop2(Operation op) {
1175 transition(dtos, dtos);
1176 switch (op) {
1177 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1178 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1179 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1180 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1181 case rem:
1182 #ifdef _LP64
1183 // Pass arguments in D0, D2
1184 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
1185 __ pop_d( F0 );
1186 #else
1187 // Pass arguments in O0O1, O2O3
1188 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1189 __ ldd( __ d_tmp, O2 );
1190 __ pop_d(Ftos_f);
1191 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1192 __ ldd( __ d_tmp, O0 );
1193 #endif
1194 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1195 assert( Ftos_d == F0, "fix this code" );
1196 break;
1197
1198 default: ShouldNotReachHere();
1199 }
1200 }
1201
1202
1203 void TemplateTable::ineg() {
1204 transition(itos, itos);
1205 __ neg(Otos_i);
1206 }
1207
1208
1209 void TemplateTable::lneg() {
1210 transition(ltos, ltos);
1211 #ifdef _LP64
1212 __ sub(G0, Otos_l, Otos_l);
1213 #else
1214 __ lneg(Otos_l1, Otos_l2);
1215 #endif
1216 }
1217
1218
1219 void TemplateTable::fneg() {
1220 transition(ftos, ftos);
1221 __ fneg(FloatRegisterImpl::S, Ftos_f);
1222 }
1223
1224
1225 void TemplateTable::dneg() {
1226 transition(dtos, dtos);
1227 // v8 has fnegd if source and dest are the same
1228 __ fneg(FloatRegisterImpl::D, Ftos_f);
1229 }
1230
1231
1232 void TemplateTable::iinc() {
1233 transition(vtos, vtos);
1234 locals_index(G3_scratch);
1235 __ ldsb(Lbcp, 2, O2); // load constant
1236 __ access_local_int(G3_scratch, Otos_i);
1237 __ add(Otos_i, O2, Otos_i);
1238 __ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes()); // access_local_int puts E.A. in G3_scratch
1239 }
1240
1241
1242 void TemplateTable::wide_iinc() {
1243 transition(vtos, vtos);
1244 locals_index_wide(G3_scratch);
1245 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed);
1246 __ access_local_int(G3_scratch, Otos_i);
1247 __ add(Otos_i, O3, Otos_i);
1248 __ st(Otos_i, G3_scratch, Interpreter::value_offset_in_bytes()); // access_local_int puts E.A. in G3_scratch
1249 }
1250
1251
1252 void TemplateTable::convert() {
1253 // %%%%% Factor this first part accross platforms
1254 #ifdef ASSERT
1255 TosState tos_in = ilgl;
1256 TosState tos_out = ilgl;
1257 switch (bytecode()) {
1258 case Bytecodes::_i2l: // fall through
1259 case Bytecodes::_i2f: // fall through
1260 case Bytecodes::_i2d: // fall through
1261 case Bytecodes::_i2b: // fall through
1262 case Bytecodes::_i2c: // fall through
1263 case Bytecodes::_i2s: tos_in = itos; break;
1264 case Bytecodes::_l2i: // fall through
1265 case Bytecodes::_l2f: // fall through
1266 case Bytecodes::_l2d: tos_in = ltos; break;
1267 case Bytecodes::_f2i: // fall through
1268 case Bytecodes::_f2l: // fall through
1269 case Bytecodes::_f2d: tos_in = ftos; break;
1270 case Bytecodes::_d2i: // fall through
1271 case Bytecodes::_d2l: // fall through
1272 case Bytecodes::_d2f: tos_in = dtos; break;
1273 default : ShouldNotReachHere();
1274 }
1275 switch (bytecode()) {
1276 case Bytecodes::_l2i: // fall through
1277 case Bytecodes::_f2i: // fall through
1278 case Bytecodes::_d2i: // fall through
1279 case Bytecodes::_i2b: // fall through
1280 case Bytecodes::_i2c: // fall through
1281 case Bytecodes::_i2s: tos_out = itos; break;
1282 case Bytecodes::_i2l: // fall through
1283 case Bytecodes::_f2l: // fall through
1284 case Bytecodes::_d2l: tos_out = ltos; break;
1285 case Bytecodes::_i2f: // fall through
1286 case Bytecodes::_l2f: // fall through
1287 case Bytecodes::_d2f: tos_out = ftos; break;
1288 case Bytecodes::_i2d: // fall through
1289 case Bytecodes::_l2d: // fall through
1290 case Bytecodes::_f2d: tos_out = dtos; break;
1291 default : ShouldNotReachHere();
1292 }
1293 transition(tos_in, tos_out);
1294 #endif
1295
1296
1297 // Conversion
1298 Label done;
1299 switch (bytecode()) {
1300 case Bytecodes::_i2l:
1301 #ifdef _LP64
1302 // Sign extend the 32 bits
1303 __ sra ( Otos_i, 0, Otos_l );
1304 #else
1305 __ addcc(Otos_i, 0, Otos_l2);
1306 __ br(Assembler::greaterEqual, true, Assembler::pt, done);
1307 __ delayed()->clr(Otos_l1);
1308 __ set(~0, Otos_l1);
1309 #endif
1310 break;
1311
1312 case Bytecodes::_i2f:
1313 __ st(Otos_i, __ d_tmp );
1314 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1315 __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
1316 break;
1317
1318 case Bytecodes::_i2d:
1319 __ st(Otos_i, __ d_tmp);
1320 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1321 __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
1322 break;
1323
1324 case Bytecodes::_i2b:
1325 __ sll(Otos_i, 24, Otos_i);
1326 __ sra(Otos_i, 24, Otos_i);
1327 break;
1328
1329 case Bytecodes::_i2c:
1330 __ sll(Otos_i, 16, Otos_i);
1331 __ srl(Otos_i, 16, Otos_i);
1332 break;
1333
1334 case Bytecodes::_i2s:
1335 __ sll(Otos_i, 16, Otos_i);
1336 __ sra(Otos_i, 16, Otos_i);
1337 break;
1338
1339 case Bytecodes::_l2i:
1340 #ifndef _LP64
1341 __ mov(Otos_l2, Otos_i);
1342 #else
1343 // Sign-extend into the high 32 bits
1344 __ sra(Otos_l, 0, Otos_i);
1345 #endif
1346 break;
1347
1348 case Bytecodes::_l2f:
1349 case Bytecodes::_l2d:
1350 __ st_long(Otos_l, __ d_tmp);
1351 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
1352
1353 if (VM_Version::v9_instructions_work()) {
1354 if (bytecode() == Bytecodes::_l2f) {
1355 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
1356 } else {
1357 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
1358 }
1359 } else {
1360 __ call_VM_leaf(
1361 Lscratch,
1362 bytecode() == Bytecodes::_l2f
1363 ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
1364 : CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
1365 );
1366 }
1367 break;
1368
1369 case Bytecodes::_f2i: {
1370 Label isNaN;
1371 // result must be 0 if value is NaN; test by comparing value to itself
1372 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
1373 // According to the v8 manual, you have to have a non-fp instruction
1374 // between fcmp and fb.
1375 if (!VM_Version::v9_instructions_work()) {
1376 __ nop();
1377 }
1378 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
1379 __ delayed()->clr(Otos_i); // NaN
1380 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
1381 __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
1382 __ ld(__ d_tmp, Otos_i);
1383 __ bind(isNaN);
1384 }
1385 break;
1386
1387 case Bytecodes::_f2l:
1388 // must uncache tos
1389 __ push_f();
1390 #ifdef _LP64
1391 __ pop_f(F1);
1392 #else
1393 __ pop_i(O0);
1394 #endif
1395 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1396 break;
1397
1398 case Bytecodes::_f2d:
1399 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
1400 break;
1401
1402 case Bytecodes::_d2i:
1403 case Bytecodes::_d2l:
1404 // must uncache tos
1405 __ push_d();
1406 #ifdef _LP64
1407 // LP64 calling conventions pass first double arg in D0
1408 __ pop_d( Ftos_d );
1409 #else
1410 __ pop_i( O0 );
1411 __ pop_i( O1 );
1412 #endif
1413 __ call_VM_leaf(Lscratch,
1414 bytecode() == Bytecodes::_d2i
1415 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
1416 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1417 break;
1418
1419 case Bytecodes::_d2f:
1420 if (VM_Version::v9_instructions_work()) {
1421 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
1422 }
1423 else {
1424 // must uncache tos
1425 __ push_d();
1426 __ pop_i(O0);
1427 __ pop_i(O1);
1428 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
1429 }
1430 break;
1431
1432 default: ShouldNotReachHere();
1433 }
1434 __ bind(done);
1435 }
1436
1437
1438 void TemplateTable::lcmp() {
1439 transition(ltos, itos);
1440
1441 #ifdef _LP64
1442 __ pop_l(O1); // pop off value 1, value 2 is in O0
1443 __ lcmp( O1, Otos_l, Otos_i );
1444 #else
1445 __ pop_l(O2); // cmp O2,3 to O0,1
1446 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
1447 #endif
1448 }
1449
1450
1451 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1452
1453 if (is_float) __ pop_f(F2);
1454 else __ pop_d(F2);
1455
1456 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:");
1457
1458 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
1459 }
1460
1461 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1462 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1463 __ verify_oop(Lmethod);
1464 __ verify_thread();
1465
1466 const Register O2_bumped_count = O2;
1467 __ profile_taken_branch(G3_scratch, O2_bumped_count);
1468
1469 // get (wide) offset to O1_disp
1470 const Register O1_disp = O1;
1471 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
1472 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
1473
1474 // Handle all the JSR stuff here, then exit.
1475 // It's much shorter and cleaner than intermingling with the
1476 // non-JSR normal-branch stuff occuring below.
1477 if( is_jsr ) {
1478 // compute return address as bci in Otos_i
1479 __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch);
1480 __ sub(Lbcp, G3_scratch, G3_scratch);
1481 __ sub(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
1482
1483 // Bump Lbcp to target of JSR
1484 __ add(Lbcp, O1_disp, Lbcp);
1485 // Push returnAddress for "ret" on stack
1486 __ push_ptr(Otos_i, G0); // push ptr sized thing plus 0 for tag.
1487 // And away we go!
1488 __ dispatch_next(vtos);
1489 return;
1490 }
1491
1492 // Normal (non-jsr) branch handling
1493
1494 // Save the current Lbcp
1495 const Register O0_cur_bcp = O0;
1496 __ mov( Lbcp, O0_cur_bcp );
1497
1498 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1499 if ( increment_invocation_counter_for_backward_branches ) {
1500 Label Lforward;
1501 // check branch direction
1502 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1503 // Bump bytecode pointer by displacement (take the branch)
1504 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1505
1506 // Update Backedge branch separately from invocations
1507 const Register G4_invoke_ctr = G4;
1508 __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
1509 if (ProfileInterpreter) {
1510 __ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
1511 if (UseOnStackReplacement) {
1512 __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
1513 }
1514 } else {
1515 if (UseOnStackReplacement) {
1516 __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
1517 }
1518 }
1519
1520 __ bind(Lforward);
1521 } else
1522 // Bump bytecode pointer by displacement (take the branch)
1523 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
1524
1525 // continue with bytecode @ target
1526 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1527 // %%%%% and changing dispatch_next to dispatch_only
1528 __ dispatch_next(vtos);
1529 }
1530
1531
1532 // Note Condition in argument is TemplateTable::Condition
1533 // arg scope is within class scope
1534
1535 void TemplateTable::if_0cmp(Condition cc) {
1536 // no pointers, integer only!
1537 transition(itos, vtos);
1538 // assume branch is more often taken than not (loops use backward branches)
1539 __ cmp( Otos_i, 0);
1540 __ if_cmp(ccNot(cc), false);
1541 }
1542
1543
1544 void TemplateTable::if_icmp(Condition cc) {
1545 transition(itos, vtos);
1546 __ pop_i(O1);
1547 __ cmp(O1, Otos_i);
1548 __ if_cmp(ccNot(cc), false);
1549 }
1550
1551
1552 void TemplateTable::if_nullcmp(Condition cc) {
1553 transition(atos, vtos);
1554 __ tst(Otos_i);
1555 __ if_cmp(ccNot(cc), true);
1556 }
1557
1558
1559 void TemplateTable::if_acmp(Condition cc) {
1560 transition(atos, vtos);
1561 __ pop_ptr(O1);
1562 __ verify_oop(O1);
1563 __ verify_oop(Otos_i);
1564 __ cmp(O1, Otos_i);
1565 __ if_cmp(ccNot(cc), true);
1566 }
1567
1568
1569
1570 void TemplateTable::ret() {
1571 transition(vtos, vtos);
1572 locals_index(G3_scratch);
1573 __ access_local_returnAddress(G3_scratch, Otos_i);
1574 // Otos_i contains the bci, compute the bcp from that
1575
1576 #ifdef _LP64
1577 #ifdef ASSERT
1578 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1579 // the result. The return address (really a BCI) was stored with an
1580 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
1581 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1582 // loaded value.
1583 { Label zzz ;
1584 __ set (65536, G3_scratch) ;
1585 __ cmp (Otos_i, G3_scratch) ;
1586 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1587 __ delayed()->nop();
1588 __ stop("BCI is in the wrong register half?");
1589 __ bind (zzz) ;
1590 }
1591 #endif
1592 #endif
1593
1594 __ profile_ret(vtos, Otos_i, G4_scratch);
1595
1596 __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch);
1597 __ add(G3_scratch, Otos_i, G3_scratch);
1598 __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
1599 __ dispatch_next(vtos);
1600 }
1601
1602
1603 void TemplateTable::wide_ret() {
1604 transition(vtos, vtos);
1605 locals_index_wide(G3_scratch);
1606 __ access_local_returnAddress(G3_scratch, Otos_i);
1607 // Otos_i contains the bci, compute the bcp from that
1608
1609 __ profile_ret(vtos, Otos_i, G4_scratch);
1610
1611 __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch);
1612 __ add(G3_scratch, Otos_i, G3_scratch);
1613 __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
1614 __ dispatch_next(vtos);
1615 }
1616
1617
1618 void TemplateTable::tableswitch() {
1619 transition(itos, vtos);
1620 Label default_case, continue_execution;
1621
1622 // align bcp
1623 __ add(Lbcp, BytesPerInt, O1);
1624 __ and3(O1, -BytesPerInt, O1);
1625 // load lo, hi
1626 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte
1627 __ ld(O1, 2 * BytesPerInt, O3); // High Byte
1628 #ifdef _LP64
1629 // Sign extend the 32 bits
1630 __ sra ( Otos_i, 0, Otos_i );
1631 #endif /* _LP64 */
1632
1633 // check against lo & hi
1634 __ cmp( Otos_i, O2);
1635 __ br( Assembler::less, false, Assembler::pn, default_case);
1636 __ delayed()->cmp( Otos_i, O3 );
1637 __ br( Assembler::greater, false, Assembler::pn, default_case);
1638 // lookup dispatch offset
1639 __ delayed()->sub(Otos_i, O2, O2);
1640 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
1641 __ sll(O2, LogBytesPerInt, O2);
1642 __ add(O2, 3 * BytesPerInt, O2);
1643 __ ba(false, continue_execution);
1644 __ delayed()->ld(O1, O2, O2);
1645 // handle default
1646 __ bind(default_case);
1647 __ profile_switch_default(O3);
1648 __ ld(O1, 0, O2); // get default offset
1649 // continue execution
1650 __ bind(continue_execution);
1651 __ add(Lbcp, O2, Lbcp);
1652 __ dispatch_next(vtos);
1653 }
1654
1655
1656 void TemplateTable::lookupswitch() {
1657 transition(itos, itos);
1658 __ stop("lookupswitch bytecode should have been rewritten");
1659 }
1660
1661 void TemplateTable::fast_linearswitch() {
1662 transition(itos, vtos);
1663 Label loop_entry, loop, found, continue_execution;
1664 // align bcp
1665 __ add(Lbcp, BytesPerInt, O1);
1666 __ and3(O1, -BytesPerInt, O1);
1667 // set counter
1668 __ ld(O1, BytesPerInt, O2);
1669 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
1670 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
1671 __ ba(false, loop_entry);
1672 __ delayed()->add(O3, O2, O2); // counter now points past last pair
1673
1674 // table search
1675 __ bind(loop);
1676 __ cmp(O4, Otos_i);
1677 __ br(Assembler::equal, true, Assembler::pn, found);
1678 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4
1679 __ inc(O3, 2 * BytesPerInt);
1680
1681 __ bind(loop_entry);
1682 __ cmp(O2, O3);
1683 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop);
1684 __ delayed()->ld(O3, 0, O4);
1685
1686 // default case
1687 __ ld(O1, 0, O4); // get default offset
1688 if (ProfileInterpreter) {
1689 __ profile_switch_default(O3);
1690 __ ba(false, continue_execution);
1691 __ delayed()->nop();
1692 }
1693
1694 // entry found -> get offset
1695 __ bind(found);
1696 if (ProfileInterpreter) {
1697 __ sub(O3, O1, O3);
1698 __ sub(O3, 2*BytesPerInt, O3);
1699 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs
1700 __ profile_switch_case(O3, O1, O2, G3_scratch);
1701
1702 __ bind(continue_execution);
1703 }
1704 __ add(Lbcp, O4, Lbcp);
1705 __ dispatch_next(vtos);
1706 }
1707
1708
1709 void TemplateTable::fast_binaryswitch() {
1710 transition(itos, vtos);
1711 // Implementation using the following core algorithm: (copied from Intel)
1712 //
1713 // int binary_search(int key, LookupswitchPair* array, int n) {
1714 // // Binary search according to "Methodik des Programmierens" by
1715 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1716 // int i = 0;
1717 // int j = n;
1718 // while (i+1 < j) {
1719 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1720 // // with Q: for all i: 0 <= i < n: key < a[i]
1721 // // where a stands for the array and assuming that the (inexisting)
1722 // // element a[n] is infinitely big.
1723 // int h = (i + j) >> 1;
1724 // // i < h < j
1725 // if (key < array[h].fast_match()) {
1726 // j = h;
1727 // } else {
1728 // i = h;
1729 // }
1730 // }
1731 // // R: a[i] <= key < a[i+1] or Q
1732 // // (i.e., if key is within array, i is the correct index)
1733 // return i;
1734 // }
1735
1736 // register allocation
1737 assert(Otos_i == O0, "alias checking");
1738 const Register Rkey = Otos_i; // already set (tosca)
1739 const Register Rarray = O1;
1740 const Register Ri = O2;
1741 const Register Rj = O3;
1742 const Register Rh = O4;
1743 const Register Rscratch = O5;
1744
1745 const int log_entry_size = 3;
1746 const int entry_size = 1 << log_entry_size;
1747
1748 Label found;
1749 // Find Array start
1750 __ add(Lbcp, 3 * BytesPerInt, Rarray);
1751 __ and3(Rarray, -BytesPerInt, Rarray);
1752 // initialize i & j (in delay slot)
1753 __ clr( Ri );
1754
1755 // and start
1756 Label entry;
1757 __ ba(false, entry);
1758 __ delayed()->ld( Rarray, -BytesPerInt, Rj);
1759 // (Rj is already in the native byte-ordering.)
1760
1761 // binary search loop
1762 { Label loop;
1763 __ bind( loop );
1764 // int h = (i + j) >> 1;
1765 __ sra( Rh, 1, Rh );
1766 // if (key < array[h].fast_match()) {
1767 // j = h;
1768 // } else {
1769 // i = h;
1770 // }
1771 __ sll( Rh, log_entry_size, Rscratch );
1772 __ ld( Rarray, Rscratch, Rscratch );
1773 // (Rscratch is already in the native byte-ordering.)
1774 __ cmp( Rkey, Rscratch );
1775 if ( VM_Version::v9_instructions_work() ) {
1776 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
1777 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
1778 }
1779 else {
1780 Label end_of_if;
1781 __ br( Assembler::less, true, Assembler::pt, end_of_if );
1782 __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
1783 __ mov( Rh, Ri ); // else i = h
1784 __ bind(end_of_if); // }
1785 }
1786
1787 // while (i+1 < j)
1788 __ bind( entry );
1789 __ add( Ri, 1, Rscratch );
1790 __ cmp(Rscratch, Rj);
1791 __ br( Assembler::less, true, Assembler::pt, loop );
1792 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1;
1793 }
1794
1795 // end of binary search, result index is i (must check again!)
1796 Label default_case;
1797 Label continue_execution;
1798 if (ProfileInterpreter) {
1799 __ mov( Ri, Rh ); // Save index in i for profiling
1800 }
1801 __ sll( Ri, log_entry_size, Ri );
1802 __ ld( Rarray, Ri, Rscratch );
1803 // (Rscratch is already in the native byte-ordering.)
1804 __ cmp( Rkey, Rscratch );
1805 __ br( Assembler::notEqual, true, Assembler::pn, default_case );
1806 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j
1807
1808 // entry found -> j = offset
1809 __ inc( Ri, BytesPerInt );
1810 __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
1811 __ ld( Rarray, Ri, Rj );
1812 // (Rj is already in the native byte-ordering.)
1813
1814 if (ProfileInterpreter) {
1815 __ ba(false, continue_execution);
1816 __ delayed()->nop();
1817 }
1818
1819 __ bind(default_case); // fall through (if not profiling)
1820 __ profile_switch_default(Ri);
1821
1822 __ bind(continue_execution);
1823 __ add( Lbcp, Rj, Lbcp );
1824 __ dispatch_next( vtos );
1825 }
1826
1827
1828 void TemplateTable::_return(TosState state) {
1829 transition(state, state);
1830 assert(_desc->calls_vm(), "inconsistent calls_vm information");
1831
1832 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
1833 assert(state == vtos, "only valid state");
1834 __ mov(G0, G3_scratch);
1835 __ access_local_ptr(G3_scratch, Otos_i);
1836 __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), O2);
1837 __ set(JVM_ACC_HAS_FINALIZER, G3);
1838 __ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2);
1839 __ andcc(G3, O2, G0);
1840 Label skip_register_finalizer;
1841 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
1842 __ delayed()->nop();
1843
1844 // Call out to do finalizer registration
1845 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i);
1846
1847 __ bind(skip_register_finalizer);
1848 }
1849
1850 __ remove_activation(state, /* throw_monitor_exception */ true);
1851
1852 // The caller's SP was adjusted upon method entry to accomodate
1853 // the callee's non-argument locals. Undo that adjustment.
1854 __ ret(); // return to caller
1855 __ delayed()->restore(I5_savedSP, G0, SP);
1856 }
1857
1858
1859 // ----------------------------------------------------------------------------
1860 // Volatile variables demand their effects be made known to all CPU's in
1861 // order. Store buffers on most chips allow reads & writes to reorder; the
1862 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1863 // memory barrier (i.e., it's not sufficient that the interpreter does not
1864 // reorder volatile references, the hardware also must not reorder them).
1865 //
1866 // According to the new Java Memory Model (JMM):
1867 // (1) All volatiles are serialized wrt to each other.
1868 // ALSO reads & writes act as aquire & release, so:
1869 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1870 // the read float up to before the read. It's OK for non-volatile memory refs
1871 // that happen before the volatile read to float down below it.
1872 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1873 // that happen BEFORE the write float down to after the write. It's OK for
1874 // non-volatile memory refs that happen after the volatile write to float up
1875 // before it.
1876 //
1877 // We only put in barriers around volatile refs (they are expensive), not
1878 // _between_ memory refs (that would require us to track the flavor of the
1879 // previous memory refs). Requirements (2) and (3) require some barriers
1880 // before volatile stores and after volatile loads. These nearly cover
1881 // requirement (1) but miss the volatile-store-volatile-load case. This final
1882 // case is placed after volatile-stores although it could just as well go
1883 // before volatile-loads.
1884 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) {
1885 // Helper function to insert a is-volatile test and memory barrier
1886 // All current sparc implementations run in TSO, needing only StoreLoad
1887 if ((order_constraint & Assembler::StoreLoad) == 0) return;
1888 __ membar( order_constraint );
1889 }
1890
1891 // ----------------------------------------------------------------------------
1892 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
1893 assert(byte_no == 1 || byte_no == 2, "byte_no out of range");
1894 // Depends on cpCacheOop layout!
1895 const int shift_count = (1 + byte_no)*BitsPerByte;
1896 Label resolved;
1897
1898 __ get_cache_and_index_at_bcp(Rcache, index, 1);
1899 __ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() +
1900 ConstantPoolCacheEntry::indices_offset())), Lbyte_code);
1901
1902 __ srl( Lbyte_code, shift_count, Lbyte_code );
1903 __ and3( Lbyte_code, 0xFF, Lbyte_code );
1904 __ cmp( Lbyte_code, (int)bytecode());
1905 __ br( Assembler::equal, false, Assembler::pt, resolved);
1906 __ delayed()->set((int)bytecode(), O1);
1907
1908 address entry;
1909 switch (bytecode()) {
1910 case Bytecodes::_getstatic : // fall through
1911 case Bytecodes::_putstatic : // fall through
1912 case Bytecodes::_getfield : // fall through
1913 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
1914 case Bytecodes::_invokevirtual : // fall through
1915 case Bytecodes::_invokespecial : // fall through
1916 case Bytecodes::_invokestatic : // fall through
1917 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
1918 default : ShouldNotReachHere(); break;
1919 }
1920 // first time invocation - must resolve first
1921 __ call_VM(noreg, entry, O1);
1922 // Update registers with resolved info
1923 __ get_cache_and_index_at_bcp(Rcache, index, 1);
1924 __ bind(resolved);
1925 }
1926
1927 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
1928 Register Rmethod,
1929 Register Ritable_index,
1930 Register Rflags,
1931 bool is_invokevirtual,
1932 bool is_invokevfinal) {
1933 // Uses both G3_scratch and G4_scratch
1934 Register Rcache = G3_scratch;
1935 Register Rscratch = G4_scratch;
1936 assert_different_registers(Rcache, Rmethod, Ritable_index);
1937
1938 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
1939
1940 // determine constant pool cache field offsets
1941 const int method_offset = in_bytes(
1942 cp_base_offset +
1943 (is_invokevirtual
1944 ? ConstantPoolCacheEntry::f2_offset()
1945 : ConstantPoolCacheEntry::f1_offset()
1946 )
1947 );
1948 const int flags_offset = in_bytes(cp_base_offset +
1949 ConstantPoolCacheEntry::flags_offset());
1950 // access constant pool cache fields
1951 const int index_offset = in_bytes(cp_base_offset +
1952 ConstantPoolCacheEntry::f2_offset());
1953
1954 if (is_invokevfinal) {
1955 __ get_cache_and_index_at_bcp(Rcache, Rscratch, 1);
1956 } else {
1957 resolve_cache_and_index(byte_no, Rcache, Rscratch);
1958 }
1959
1960 __ ld_ptr(Address(Rcache, 0, method_offset), Rmethod);
1961 if (Ritable_index != noreg) {
1962 __ ld_ptr(Address(Rcache, 0, index_offset), Ritable_index);
1963 }
1964 __ ld_ptr(Address(Rcache, 0, flags_offset), Rflags);
1965 }
1966
1967 // The Rcache register must be set before call
1968 void TemplateTable::load_field_cp_cache_entry(Register Robj,
1969 Register Rcache,
1970 Register index,
1971 Register Roffset,
1972 Register Rflags,
1973 bool is_static) {
1974 assert_different_registers(Rcache, Rflags, Roffset);
1975
1976 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
1977
1978 __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
1979 ConstantPoolCacheEntry::flags_offset())), Rflags);
1980 __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
1981 ConstantPoolCacheEntry::f2_offset())), Roffset);
1982 if (is_static) {
1983 __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
1984 ConstantPoolCacheEntry::f1_offset())), Robj);
1985 }
1986 }
1987
1988 // The registers Rcache and index expected to be set before call.
1989 // Correct values of the Rcache and index registers are preserved.
1990 void TemplateTable::jvmti_post_field_access(Register Rcache,
1991 Register index,
1992 bool is_static,
1993 bool has_tos) {
1994 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
1995
1996 if (JvmtiExport::can_post_field_access()) {
1997 // Check to see if a field access watch has been set before we take
1998 // the time to call into the VM.
1999 Label Label1;
2000 assert_different_registers(Rcache, index, G1_scratch);
2001 Address get_field_access_count_addr(G1_scratch,
2002 (address)JvmtiExport::get_field_access_count_addr(),
2003 relocInfo::none);
2004 __ load_contents(get_field_access_count_addr, G1_scratch);
2005 __ tst(G1_scratch);
2006 __ br(Assembler::zero, false, Assembler::pt, Label1);
2007 __ delayed()->nop();
2008
2009 __ add(Rcache, in_bytes(cp_base_offset), Rcache);
2010
2011 if (is_static) {
2012 __ clr(Otos_i);
2013 } else {
2014 if (has_tos) {
2015 // save object pointer before call_VM() clobbers it
2016 __ mov(Otos_i, Lscratch);
2017 } else {
2018 // Load top of stack (do not pop the value off the stack);
2019 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
2020 }
2021 __ verify_oop(Otos_i);
2022 }
2023 // Otos_i: object pointer or NULL if static
2024 // Rcache: cache entry pointer
2025 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2026 Otos_i, Rcache);
2027 if (!is_static && has_tos) {
2028 __ mov(Lscratch, Otos_i); // restore object pointer
2029 __ verify_oop(Otos_i);
2030 }
2031 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2032 __ bind(Label1);
2033 }
2034 }
2035
2036 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2037 transition(vtos, vtos);
2038
2039 Register Rcache = G3_scratch;
2040 Register index = G4_scratch;
2041 Register Rclass = Rcache;
2042 Register Roffset= G4_scratch;
2043 Register Rflags = G1_scratch;
2044 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2045
2046 resolve_cache_and_index(byte_no, Rcache, index);
2047 jvmti_post_field_access(Rcache, index, is_static, false);
2048 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2049
2050 if (!is_static) {
2051 pop_and_check_object(Rclass);
2052 } else {
2053 __ verify_oop(Rclass);
2054 }
2055
2056 Label exit;
2057
2058 Assembler::Membar_mask_bits membar_bits =
2059 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2060
2061 if (__ membar_has_effect(membar_bits)) {
2062 // Get volatile flag
2063 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2064 __ and3(Rflags, Lscratch, Lscratch);
2065 }
2066
2067 Label checkVolatile;
2068
2069 // compute field type
2070 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
2071 __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
2072 // Make sure we don't need to mask Rflags for tosBits after the above shift
2073 ConstantPoolCacheEntry::verify_tosBits();
2074
2075 // Check atos before itos for getstatic, more likely (in Queens at least)
2076 __ cmp(Rflags, atos);
2077 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2078 __ delayed() ->cmp(Rflags, itos);
2079
2080 // atos
2081 __ ld_ptr(Rclass, Roffset, Otos_i);
2082 __ verify_oop(Otos_i);
2083 __ push(atos);
2084 if (!is_static) {
2085 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
2086 }
2087 __ ba(false, checkVolatile);
2088 __ delayed()->tst(Lscratch);
2089
2090 __ bind(notObj);
2091
2092 // cmp(Rflags, itos);
2093 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2094 __ delayed() ->cmp(Rflags, ltos);
2095
2096 // itos
2097 __ ld(Rclass, Roffset, Otos_i);
2098 __ push(itos);
2099 if (!is_static) {
2100 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
2101 }
2102 __ ba(false, checkVolatile);
2103 __ delayed()->tst(Lscratch);
2104
2105 __ bind(notInt);
2106
2107 // cmp(Rflags, ltos);
2108 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2109 __ delayed() ->cmp(Rflags, btos);
2110
2111 // ltos
2112 // load must be atomic
2113 __ ld_long(Rclass, Roffset, Otos_l);
2114 __ push(ltos);
2115 if (!is_static) {
2116 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
2117 }
2118 __ ba(false, checkVolatile);
2119 __ delayed()->tst(Lscratch);
2120
2121 __ bind(notLong);
2122
2123 // cmp(Rflags, btos);
2124 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2125 __ delayed() ->cmp(Rflags, ctos);
2126
2127 // btos
2128 __ ldsb(Rclass, Roffset, Otos_i);
2129 __ push(itos);
2130 if (!is_static) {
2131 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
2132 }
2133 __ ba(false, checkVolatile);
2134 __ delayed()->tst(Lscratch);
2135
2136 __ bind(notByte);
2137
2138 // cmp(Rflags, ctos);
2139 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2140 __ delayed() ->cmp(Rflags, stos);
2141
2142 // ctos
2143 __ lduh(Rclass, Roffset, Otos_i);
2144 __ push(itos);
2145 if (!is_static) {
2146 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
2147 }
2148 __ ba(false, checkVolatile);
2149 __ delayed()->tst(Lscratch);
2150
2151 __ bind(notChar);
2152
2153 // cmp(Rflags, stos);
2154 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2155 __ delayed() ->cmp(Rflags, ftos);
2156
2157 // stos
2158 __ ldsh(Rclass, Roffset, Otos_i);
2159 __ push(itos);
2160 if (!is_static) {
2161 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
2162 }
2163 __ ba(false, checkVolatile);
2164 __ delayed()->tst(Lscratch);
2165
2166 __ bind(notShort);
2167
2168
2169 // cmp(Rflags, ftos);
2170 __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
2171 __ delayed() ->tst(Lscratch);
2172
2173 // ftos
2174 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
2175 __ push(ftos);
2176 if (!is_static) {
2177 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
2178 }
2179 __ ba(false, checkVolatile);
2180 __ delayed()->tst(Lscratch);
2181
2182 __ bind(notFloat);
2183
2184
2185 // dtos
2186 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
2187 __ push(dtos);
2188 if (!is_static) {
2189 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
2190 }
2191
2192 __ bind(checkVolatile);
2193 if (__ membar_has_effect(membar_bits)) {
2194 // __ tst(Lscratch); executed in delay slot
2195 __ br(Assembler::zero, false, Assembler::pt, exit);
2196 __ delayed()->nop();
2197 volatile_barrier(membar_bits);
2198 }
2199
2200 __ bind(exit);
2201 }
2202
2203
2204 void TemplateTable::getfield(int byte_no) {
2205 getfield_or_static(byte_no, false);
2206 }
2207
2208 void TemplateTable::getstatic(int byte_no) {
2209 getfield_or_static(byte_no, true);
2210 }
2211
2212
2213 void TemplateTable::fast_accessfield(TosState state) {
2214 transition(atos, state);
2215 Register Rcache = G3_scratch;
2216 Register index = G4_scratch;
2217 Register Roffset = G4_scratch;
2218 Register Rflags = Rcache;
2219 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2220
2221 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2222 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
2223
2224 __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())), Roffset);
2225
2226 __ null_check(Otos_i);
2227 __ verify_oop(Otos_i);
2228
2229 Label exit;
2230
2231 Assembler::Membar_mask_bits membar_bits =
2232 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2233 if (__ membar_has_effect(membar_bits)) {
2234 // Get volatile flag
2235 __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())), Rflags);
2236 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2237 }
2238
2239 switch (bytecode()) {
2240 case Bytecodes::_fast_bgetfield:
2241 __ ldsb(Otos_i, Roffset, Otos_i);
2242 break;
2243 case Bytecodes::_fast_cgetfield:
2244 __ lduh(Otos_i, Roffset, Otos_i);
2245 break;
2246 case Bytecodes::_fast_sgetfield:
2247 __ ldsh(Otos_i, Roffset, Otos_i);
2248 break;
2249 case Bytecodes::_fast_igetfield:
2250 __ ld(Otos_i, Roffset, Otos_i);
2251 break;
2252 case Bytecodes::_fast_lgetfield:
2253 __ ld_long(Otos_i, Roffset, Otos_l);
2254 break;
2255 case Bytecodes::_fast_fgetfield:
2256 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f);
2257 break;
2258 case Bytecodes::_fast_dgetfield:
2259 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
2260 break;
2261 case Bytecodes::_fast_agetfield:
2262 __ ld_ptr(Otos_i, Roffset, Otos_i);
2263 break;
2264 default:
2265 ShouldNotReachHere();
2266 }
2267
2268 if (__ membar_has_effect(membar_bits)) {
2269 __ btst(Lscratch, Rflags);
2270 __ br(Assembler::zero, false, Assembler::pt, exit);
2271 __ delayed()->nop();
2272 volatile_barrier(membar_bits);
2273 __ bind(exit);
2274 }
2275
2276 if (state == atos) {
2277 __ verify_oop(Otos_i); // does not blow flags!
2278 }
2279 }
2280
2281 void TemplateTable::jvmti_post_fast_field_mod() {
2282 if (JvmtiExport::can_post_field_modification()) {
2283 // Check to see if a field modification watch has been set before we take
2284 // the time to call into the VM.
2285 Label done;
2286 Address get_field_modification_count_addr(G4_scratch, (address)JvmtiExport::get_field_modification_count_addr(), relocInfo::none);
2287 __ load_contents(get_field_modification_count_addr, G4_scratch);
2288 __ tst(G4_scratch);
2289 __ br(Assembler::zero, false, Assembler::pt, done);
2290 __ delayed()->nop();
2291 __ pop_ptr(G4_scratch); // copy the object pointer from tos
2292 __ verify_oop(G4_scratch);
2293 __ push_ptr(G4_scratch); // put the object pointer back on tos
2294 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1);
2295 // Save tos values before call_VM() clobbers them. Since we have
2296 // to do it for every data type, we use the saved values as the
2297 // jvalue object.
2298 switch (bytecode()) { // save tos values before call_VM() clobbers them
2299 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break;
2300 case Bytecodes::_fast_bputfield: // fall through
2301 case Bytecodes::_fast_sputfield: // fall through
2302 case Bytecodes::_fast_cputfield: // fall through
2303 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break;
2304 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break;
2305 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break;
2306 // get words in right order for use as jvalue object
2307 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break;
2308 }
2309 // setup pointer to jvalue object
2310 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize);
2311 // G4_scratch: object pointer
2312 // G1_scratch: cache entry pointer
2313 // G3_scratch: jvalue object on the stack
2314 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
2315 switch (bytecode()) { // restore tos values
2316 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break;
2317 case Bytecodes::_fast_bputfield: // fall through
2318 case Bytecodes::_fast_sputfield: // fall through
2319 case Bytecodes::_fast_cputfield: // fall through
2320 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break;
2321 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break;
2322 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break;
2323 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break;
2324 }
2325 __ bind(done);
2326 }
2327 }
2328
2329 // The registers Rcache and index expected to be set before call.
2330 // The function may destroy various registers, just not the Rcache and index registers.
2331 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) {
2332 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2333
2334 if (JvmtiExport::can_post_field_modification()) {
2335 // Check to see if a field modification watch has been set before we take
2336 // the time to call into the VM.
2337 Label Label1;
2338 assert_different_registers(Rcache, index, G1_scratch);
2339 Address get_field_modification_count_addr(G1_scratch,
2340 (address)JvmtiExport::get_field_modification_count_addr(),
2341 relocInfo::none);
2342 __ load_contents(get_field_modification_count_addr, G1_scratch);
2343 __ tst(G1_scratch);
2344 __ br(Assembler::zero, false, Assembler::pt, Label1);
2345 __ delayed()->nop();
2346
2347 // The Rcache and index registers have been already set.
2348 // This allows to eliminate this call but the Rcache and index
2349 // registers must be correspondingly used after this line.
2350 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1);
2351
2352 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch);
2353 if (is_static) {
2354 // Life is simple. Null out the object pointer.
2355 __ clr(G4_scratch);
2356 } else {
2357 Register Rflags = G1_scratch;
2358 // Life is harder. The stack holds the value on top, followed by the
2359 // object. We don't know the size of the value, though; it could be
2360 // one or two words depending on its type. As a result, we must find
2361 // the type to determine where the object is.
2362
2363 Label two_word, valsizeknown;
2364 __ ld_ptr(Address(G1_scratch, 0, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())), Rflags);
2365 __ mov(Lesp, G4_scratch);
2366 __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
2367 // Make sure we don't need to mask Rflags for tosBits after the above shift
2368 ConstantPoolCacheEntry::verify_tosBits();
2369 __ cmp(Rflags, ltos);
2370 __ br(Assembler::equal, false, Assembler::pt, two_word);
2371 __ delayed()->cmp(Rflags, dtos);
2372 __ br(Assembler::equal, false, Assembler::pt, two_word);
2373 __ delayed()->nop();
2374 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
2375 __ br(Assembler::always, false, Assembler::pt, valsizeknown);
2376 __ delayed()->nop();
2377 __ bind(two_word);
2378
2379 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
2380
2381 __ bind(valsizeknown);
2382 // setup object pointer
2383 __ ld_ptr(G4_scratch, 0, G4_scratch);
2384 __ verify_oop(G4_scratch);
2385 }
2386 // setup pointer to jvalue object
2387 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize);
2388 // G4_scratch: object pointer or NULL if static
2389 // G3_scratch: cache entry pointer
2390 // G1_scratch: jvalue object on the stack
2391 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2392 G4_scratch, G3_scratch, G1_scratch);
2393 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2394 __ bind(Label1);
2395 }
2396 }
2397
2398 void TemplateTable::pop_and_check_object(Register r) {
2399 __ pop_ptr(r);
2400 __ null_check(r); // for field access must check obj.
2401 __ verify_oop(r);
2402 }
2403
2404 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2405 transition(vtos, vtos);
2406 Register Rcache = G3_scratch;
2407 Register index = G4_scratch;
2408 Register Rclass = Rcache;
2409 Register Roffset= G4_scratch;
2410 Register Rflags = G1_scratch;
2411 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2412
2413 resolve_cache_and_index(byte_no, Rcache, index);
2414 jvmti_post_field_mod(Rcache, index, is_static);
2415 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2416
2417 Assembler::Membar_mask_bits read_bits =
2418 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2419 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2420
2421 Label notVolatile, checkVolatile, exit;
2422 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2423 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2424 __ and3(Rflags, Lscratch, Lscratch);
2425
2426 if (__ membar_has_effect(read_bits)) {
2427 __ tst(Lscratch);
2428 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2429 __ delayed()->nop();
2430 volatile_barrier(read_bits);
2431 __ bind(notVolatile);
2432 }
2433 }
2434
2435 __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
2436 // Make sure we don't need to mask Rflags for tosBits after the above shift
2437 ConstantPoolCacheEntry::verify_tosBits();
2438
2439 // compute field type
2440 Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
2441
2442 if (is_static) {
2443 // putstatic with object type most likely, check that first
2444 __ cmp(Rflags, atos );
2445 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2446 __ delayed() ->cmp(Rflags, itos );
2447
2448 // atos
2449 __ pop_ptr();
2450 __ verify_oop(Otos_i);
2451 __ st_ptr(Otos_i, Rclass, Roffset);
2452 __ store_check(G1_scratch, Rclass, Roffset);
2453 __ ba(false, checkVolatile);
2454 __ delayed()->tst(Lscratch);
2455
2456 __ bind(notObj);
2457
2458 // cmp(Rflags, itos );
2459 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2460 __ delayed() ->cmp(Rflags, btos );
2461
2462 // itos
2463 __ pop_i();
2464 __ st(Otos_i, Rclass, Roffset);
2465 __ ba(false, checkVolatile);
2466 __ delayed()->tst(Lscratch);
2467
2468 __ bind(notInt);
2469
2470 } else {
2471 // putfield with int type most likely, check that first
2472 __ cmp(Rflags, itos );
2473 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2474 __ delayed() ->cmp(Rflags, atos );
2475
2476 // itos
2477 __ pop_i();
2478 pop_and_check_object(Rclass);
2479 __ st(Otos_i, Rclass, Roffset);
2480 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch);
2481 __ ba(false, checkVolatile);
2482 __ delayed()->tst(Lscratch);
2483
2484 __ bind(notInt);
2485 // cmp(Rflags, atos );
2486 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2487 __ delayed() ->cmp(Rflags, btos );
2488
2489 // atos
2490 __ pop_ptr();
2491 pop_and_check_object(Rclass);
2492 __ verify_oop(Otos_i);
2493 __ st_ptr(Otos_i, Rclass, Roffset);
2494 __ store_check(G1_scratch, Rclass, Roffset);
2495 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch);
2496 __ ba(false, checkVolatile);
2497 __ delayed()->tst(Lscratch);
2498
2499 __ bind(notObj);
2500 }
2501
2502 // cmp(Rflags, btos );
2503 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2504 __ delayed() ->cmp(Rflags, ltos );
2505
2506 // btos
2507 __ pop_i();
2508 if (!is_static) pop_and_check_object(Rclass);
2509 __ stb(Otos_i, Rclass, Roffset);
2510 if (!is_static) {
2511 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch);
2512 }
2513 __ ba(false, checkVolatile);
2514 __ delayed()->tst(Lscratch);
2515
2516 __ bind(notByte);
2517
2518 // cmp(Rflags, ltos );
2519 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2520 __ delayed() ->cmp(Rflags, ctos );
2521
2522 // ltos
2523 __ pop_l();
2524 if (!is_static) pop_and_check_object(Rclass);
2525 __ st_long(Otos_l, Rclass, Roffset);
2526 if (!is_static) {
2527 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch);
2528 }
2529 __ ba(false, checkVolatile);
2530 __ delayed()->tst(Lscratch);
2531
2532 __ bind(notLong);
2533
2534 // cmp(Rflags, ctos );
2535 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2536 __ delayed() ->cmp(Rflags, stos );
2537
2538 // ctos (char)
2539 __ pop_i();
2540 if (!is_static) pop_and_check_object(Rclass);
2541 __ sth(Otos_i, Rclass, Roffset);
2542 if (!is_static) {
2543 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch);
2544 }
2545 __ ba(false, checkVolatile);
2546 __ delayed()->tst(Lscratch);
2547
2548 __ bind(notChar);
2549 // cmp(Rflags, stos );
2550 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2551 __ delayed() ->cmp(Rflags, ftos );
2552
2553 // stos (char)
2554 __ pop_i();
2555 if (!is_static) pop_and_check_object(Rclass);
2556 __ sth(Otos_i, Rclass, Roffset);
2557 if (!is_static) {
2558 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch);
2559 }
2560 __ ba(false, checkVolatile);
2561 __ delayed()->tst(Lscratch);
2562
2563 __ bind(notShort);
2564 // cmp(Rflags, ftos );
2565 __ br(Assembler::notZero, false, Assembler::pt, notFloat);
2566 __ delayed()->nop();
2567
2568 // ftos
2569 __ pop_f();
2570 if (!is_static) pop_and_check_object(Rclass);
2571 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2572 if (!is_static) {
2573 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch);
2574 }
2575 __ ba(false, checkVolatile);
2576 __ delayed()->tst(Lscratch);
2577
2578 __ bind(notFloat);
2579
2580 // dtos
2581 __ pop_d();
2582 if (!is_static) pop_and_check_object(Rclass);
2583 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2584 if (!is_static) {
2585 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch);
2586 }
2587
2588 __ bind(checkVolatile);
2589 __ tst(Lscratch);
2590
2591 if (__ membar_has_effect(write_bits)) {
2592 // __ tst(Lscratch); in delay slot
2593 __ br(Assembler::zero, false, Assembler::pt, exit);
2594 __ delayed()->nop();
2595 volatile_barrier(Assembler::StoreLoad);
2596 __ bind(exit);
2597 }
2598 }
2599
2600 void TemplateTable::fast_storefield(TosState state) {
2601 transition(state, vtos);
2602 Register Rcache = G3_scratch;
2603 Register Rclass = Rcache;
2604 Register Roffset= G4_scratch;
2605 Register Rflags = G1_scratch;
2606 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2607
2608 jvmti_post_fast_field_mod();
2609
2610 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1);
2611
2612 Assembler::Membar_mask_bits read_bits =
2613 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2614 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2615
2616 Label notVolatile, checkVolatile, exit;
2617 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2618 __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
2619 ConstantPoolCacheEntry::flags_offset())), Rflags);
2620 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2621 __ and3(Rflags, Lscratch, Lscratch);
2622 if (__ membar_has_effect(read_bits)) {
2623 __ tst(Lscratch);
2624 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2625 __ delayed()->nop();
2626 volatile_barrier(read_bits);
2627 __ bind(notVolatile);
2628 }
2629 }
2630
2631 __ ld_ptr(Address(Rcache, 0, in_bytes(cp_base_offset +
2632 ConstantPoolCacheEntry::f2_offset())), Roffset);
2633 pop_and_check_object(Rclass);
2634
2635 switch (bytecode()) {
2636 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break;
2637 case Bytecodes::_fast_cputfield: /* fall through */
2638 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break;
2639 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break;
2640 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break;
2641 case Bytecodes::_fast_fputfield:
2642 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2643 break;
2644 case Bytecodes::_fast_dputfield:
2645 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2646 break;
2647 case Bytecodes::_fast_aputfield:
2648 __ st_ptr(Otos_i, Rclass, Roffset);
2649 __ store_check(G1_scratch, Rclass, Roffset);
2650 break;
2651 default:
2652 ShouldNotReachHere();
2653 }
2654
2655 if (__ membar_has_effect(write_bits)) {
2656 __ tst(Lscratch);
2657 __ br(Assembler::zero, false, Assembler::pt, exit);
2658 __ delayed()->nop();
2659 volatile_barrier(Assembler::StoreLoad);
2660 __ bind(exit);
2661 }
2662 }
2663
2664
2665 void TemplateTable::putfield(int byte_no) {
2666 putfield_or_static(byte_no, false);
2667 }
2668
2669 void TemplateTable::putstatic(int byte_no) {
2670 putfield_or_static(byte_no, true);
2671 }
2672
2673
2674 void TemplateTable::fast_xaccess(TosState state) {
2675 transition(vtos, state);
2676 Register Rcache = G3_scratch;
2677 Register Roffset = G4_scratch;
2678 Register Rflags = G4_scratch;
2679 Register Rreceiver = Lscratch;
2680
2681 __ ld_ptr(Llocals, Interpreter::value_offset_in_bytes(), Rreceiver);
2682
2683 // access constant pool cache (is resolved)
2684 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
2685 __ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())), Roffset);
2686 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp
2687
2688 __ verify_oop(Rreceiver);
2689 __ null_check(Rreceiver);
2690 if (state == atos) {
2691 __ ld_ptr(Rreceiver, Roffset, Otos_i);
2692 } else if (state == itos) {
2693 __ ld (Rreceiver, Roffset, Otos_i) ;
2694 } else if (state == ftos) {
2695 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f);
2696 } else {
2697 ShouldNotReachHere();
2698 }
2699
2700 Assembler::Membar_mask_bits membar_bits =
2701 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2702 if (__ membar_has_effect(membar_bits)) {
2703
2704 // Get is_volatile value in Rflags and check if membar is needed
2705 __ ld_ptr(Address(Rcache, 0, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset())), Rflags);
2706
2707 // Test volatile
2708 Label notVolatile;
2709 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2710 __ btst(Rflags, Lscratch);
2711 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2712 __ delayed()->nop();
2713 volatile_barrier(membar_bits);
2714 __ bind(notVolatile);
2715 }
2716
2717 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
2718 __ sub(Lbcp, 1, Lbcp);
2719 }
2720
2721 //----------------------------------------------------------------------------------------------------
2722 // Calls
2723
2724 void TemplateTable::count_calls(Register method, Register temp) {
2725 // implemented elsewhere
2726 ShouldNotReachHere();
2727 }
2728
2729 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
2730 Register Rtemp = G4_scratch;
2731 Register Rcall = Rindex;
2732 assert_different_registers(Rcall, G5_method, Gargs, Rret);
2733
2734 // get target methodOop & entry point
2735 const int base = instanceKlass::vtable_start_offset() * wordSize;
2736 if (vtableEntry::size() % 3 == 0) {
2737 // scale the vtable index by 12:
2738 int one_third = vtableEntry::size() / 3;
2739 __ sll(Rindex, exact_log2(one_third * 1 * wordSize), Rtemp);
2740 __ sll(Rindex, exact_log2(one_third * 2 * wordSize), Rindex);
2741 __ add(Rindex, Rtemp, Rindex);
2742 } else {
2743 // scale the vtable index by 8:
2744 __ sll(Rindex, exact_log2(vtableEntry::size() * wordSize), Rindex);
2745 }
2746
2747 __ add(Rrecv, Rindex, Rrecv);
2748 __ ld_ptr(Rrecv, base + vtableEntry::method_offset_in_bytes(), G5_method);
2749
2750 __ call_from_interpreter(Rcall, Gargs, Rret);
2751 }
2752
2753 void TemplateTable::invokevirtual(int byte_no) {
2754 transition(vtos, vtos);
2755
2756 Register Rscratch = G3_scratch;
2757 Register Rtemp = G4_scratch;
2758 Register Rret = Lscratch;
2759 Register Rrecv = G5_method;
2760 Label notFinal;
2761
2762 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true);
2763 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2764
2765 // Check for vfinal
2766 __ set((1 << ConstantPoolCacheEntry::vfinalMethod), G4_scratch);
2767 __ btst(Rret, G4_scratch);
2768 __ br(Assembler::zero, false, Assembler::pt, notFinal);
2769 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
2770
2771 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
2772
2773 invokevfinal_helper(Rscratch, Rret);
2774
2775 __ bind(notFinal);
2776
2777 __ mov(G5_method, Rscratch); // better scratch register
2778 __ load_receiver(G4_scratch, O0); // gets receiverOop
2779 // receiver is in O0
2780 __ verify_oop(O0);
2781
2782 // get return address
2783 Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
2784 __ load_address(table);
2785 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
2786 // Make sure we don't need to mask Rret for tosBits after the above shift
2787 ConstantPoolCacheEntry::verify_tosBits();
2788 __ sll(Rret, LogBytesPerWord, Rret);
2789 __ ld_ptr(Rtemp, Rret, Rret); // get return address
2790
2791 // get receiver klass
2792 __ null_check(O0, oopDesc::klass_offset_in_bytes());
2793 __ ld_ptr(Address(O0, 0, oopDesc::klass_offset_in_bytes()), Rrecv);
2794 __ verify_oop(Rrecv);
2795
2796 __ profile_virtual_call(Rrecv, O4);
2797
2798 generate_vtable_call(Rrecv, Rscratch, Rret);
2799 }
2800
2801 void TemplateTable::fast_invokevfinal(int byte_no) {
2802 transition(vtos, vtos);
2803
2804 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
2805 /*is_invokevfinal*/true);
2806 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2807 invokevfinal_helper(G3_scratch, Lscratch);
2808 }
2809
2810 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
2811 Register Rtemp = G4_scratch;
2812
2813 __ verify_oop(G5_method);
2814
2815 // Load receiver from stack slot
2816 __ lduh(Address(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())), G4_scratch);
2817 __ load_receiver(G4_scratch, O0);
2818
2819 // receiver NULL check
2820 __ null_check(O0);
2821
2822 __ profile_final_call(O4);
2823
2824 // get return address
2825 Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
2826 __ load_address(table);
2827 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
2828 // Make sure we don't need to mask Rret for tosBits after the above shift
2829 ConstantPoolCacheEntry::verify_tosBits();
2830 __ sll(Rret, LogBytesPerWord, Rret);
2831 __ ld_ptr(Rtemp, Rret, Rret); // get return address
2832
2833
2834 // do the call
2835 __ call_from_interpreter(Rscratch, Gargs, Rret);
2836 }
2837
2838 void TemplateTable::invokespecial(int byte_no) {
2839 transition(vtos, vtos);
2840
2841 Register Rscratch = G3_scratch;
2842 Register Rtemp = G4_scratch;
2843 Register Rret = Lscratch;
2844
2845 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
2846 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2847
2848 __ verify_oop(G5_method);
2849
2850 __ lduh(Address(G5_method, 0, in_bytes(methodOopDesc::size_of_parameters_offset())), G4_scratch);
2851 __ load_receiver(G4_scratch, O0);
2852
2853 // receiver NULL check
2854 __ null_check(O0);
2855
2856 __ profile_call(O4);
2857
2858 // get return address
2859 Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
2860 __ load_address(table);
2861 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
2862 // Make sure we don't need to mask Rret for tosBits after the above shift
2863 ConstantPoolCacheEntry::verify_tosBits();
2864 __ sll(Rret, LogBytesPerWord, Rret);
2865 __ ld_ptr(Rtemp, Rret, Rret); // get return address
2866
2867 // do the call
2868 __ call_from_interpreter(Rscratch, Gargs, Rret);
2869 }
2870
2871 void TemplateTable::invokestatic(int byte_no) {
2872 transition(vtos, vtos);
2873
2874 Register Rscratch = G3_scratch;
2875 Register Rtemp = G4_scratch;
2876 Register Rret = Lscratch;
2877
2878 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, false);
2879 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2880
2881 __ verify_oop(G5_method);
2882
2883 __ profile_call(O4);
2884
2885 // get return address
2886 Address table(Rtemp, (address)Interpreter::return_3_addrs_by_index_table());
2887 __ load_address(table);
2888 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
2889 // Make sure we don't need to mask Rret for tosBits after the above shift
2890 ConstantPoolCacheEntry::verify_tosBits();
2891 __ sll(Rret, LogBytesPerWord, Rret);
2892 __ ld_ptr(Rtemp, Rret, Rret); // get return address
2893
2894 // do the call
2895 __ call_from_interpreter(Rscratch, Gargs, Rret);
2896 }
2897
2898
2899 void TemplateTable::invokeinterface_object_method(Register RklassOop,
2900 Register Rcall,
2901 Register Rret,
2902 Register Rflags) {
2903 Register Rscratch = G4_scratch;
2904 Register Rindex = Lscratch;
2905
2906 assert_different_registers(Rscratch, Rindex, Rret);
2907
2908 Label notFinal;
2909
2910 // Check for vfinal
2911 __ set((1 << ConstantPoolCacheEntry::vfinalMethod), Rscratch);
2912 __ btst(Rflags, Rscratch);
2913 __ br(Assembler::zero, false, Assembler::pt, notFinal);
2914 __ delayed()->nop();
2915
2916 __ profile_final_call(O4);
2917
2918 // do the call - the index (f2) contains the methodOop
2919 assert_different_registers(G5_method, Gargs, Rcall);
2920 __ mov(Rindex, G5_method);
2921 __ call_from_interpreter(Rcall, Gargs, Rret);
2922 __ bind(notFinal);
2923
2924 __ profile_virtual_call(RklassOop, O4);
2925 generate_vtable_call(RklassOop, Rindex, Rret);
2926 }
2927
2928
2929 void TemplateTable::invokeinterface(int byte_no) {
2930 transition(vtos, vtos);
2931
2932 Register Rscratch = G4_scratch;
2933 Register Rret = G3_scratch;
2934 Register Rindex = Lscratch;
2935 Register Rinterface = G1_scratch;
2936 Register RklassOop = G5_method;
2937 Register Rflags = O1;
2938 assert_different_registers(Rscratch, G5_method);
2939
2940 load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, false);
2941 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2942
2943 // get receiver
2944 __ and3(Rflags, 0xFF, Rscratch); // gets number of parameters
2945 __ load_receiver(Rscratch, O0);
2946 __ verify_oop(O0);
2947
2948 __ mov(Rflags, Rret);
2949
2950 // get return address
2951 Address table(Rscratch, (address)Interpreter::return_5_addrs_by_index_table());
2952 __ load_address(table);
2953 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
2954 // Make sure we don't need to mask Rret for tosBits after the above shift
2955 ConstantPoolCacheEntry::verify_tosBits();
2956 __ sll(Rret, LogBytesPerWord, Rret);
2957 __ ld_ptr(Rscratch, Rret, Rret); // get return address
2958
2959 // get receiver klass
2960 __ null_check(O0, oopDesc::klass_offset_in_bytes());
2961 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), RklassOop);
2962 __ verify_oop(RklassOop);
2963
2964 // Special case of invokeinterface called for virtual method of
2965 // java.lang.Object. See cpCacheOop.cpp for details.
2966 // This code isn't produced by javac, but could be produced by
2967 // another compliant java compiler.
2968 Label notMethod;
2969 __ set((1 << ConstantPoolCacheEntry::methodInterface), Rscratch);
2970 __ btst(Rflags, Rscratch);
2971 __ br(Assembler::zero, false, Assembler::pt, notMethod);
2972 __ delayed()->nop();
2973
2974 invokeinterface_object_method(RklassOop, Rinterface, Rret, Rflags);
2975
2976 __ bind(notMethod);
2977
2978 __ profile_virtual_call(RklassOop, O4);
2979
2980 //
2981 // find entry point to call
2982 //
2983
2984 // compute start of first itableOffsetEntry (which is at end of vtable)
2985 const int base = instanceKlass::vtable_start_offset() * wordSize;
2986 Label search;
2987 Register Rtemp = Rflags;
2988
2989 __ ld(Address(RklassOop, 0, instanceKlass::vtable_length_offset() * wordSize), Rtemp);
2990 if (align_object_offset(1) > 1) {
2991 __ round_to(Rtemp, align_object_offset(1));
2992 }
2993 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4;
2994 if (Assembler::is_simm13(base)) {
2995 __ add(Rtemp, base, Rtemp);
2996 } else {
2997 __ set(base, Rscratch);
2998 __ add(Rscratch, Rtemp, Rtemp);
2999 }
3000 __ add(RklassOop, Rtemp, Rscratch);
3001
3002 __ bind(search);
3003
3004 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp);
3005 {
3006 Label ok;
3007
3008 // Check that entry is non-null. Null entries are probably a bytecode
3009 // problem. If the interface isn't implemented by the reciever class,
3010 // the VM should throw IncompatibleClassChangeError. linkResolver checks
3011 // this too but that's only if the entry isn't already resolved, so we
3012 // need to check again.
3013 __ br_notnull( Rtemp, false, Assembler::pt, ok);
3014 __ delayed()->nop();
3015 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3016 __ should_not_reach_here();
3017 __ bind(ok);
3018 __ verify_oop(Rtemp);
3019 }
3020
3021 __ verify_oop(Rinterface);
3022
3023 __ cmp(Rinterface, Rtemp);
3024 __ brx(Assembler::notEqual, true, Assembler::pn, search);
3025 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch);
3026
3027 // entry found and Rscratch points to it
3028 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch);
3029
3030 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
3031 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8;
3032 __ add(Rscratch, Rindex, Rscratch);
3033 __ ld_ptr(RklassOop, Rscratch, G5_method);
3034
3035 // Check for abstract method error.
3036 {
3037 Label ok;
3038 __ tst(G5_method);
3039 __ brx(Assembler::notZero, false, Assembler::pt, ok);
3040 __ delayed()->nop();
3041 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3042 __ should_not_reach_here();
3043 __ bind(ok);
3044 }
3045
3046 Register Rcall = Rinterface;
3047 assert_different_registers(Rcall, G5_method, Gargs, Rret);
3048
3049 __ verify_oop(G5_method);
3050 __ call_from_interpreter(Rcall, Gargs, Rret);
3051
3052 }
3053
3054
3055 //----------------------------------------------------------------------------------------------------
3056 // Allocation
3057
3058 void TemplateTable::_new() {
3059 transition(vtos, atos);
3060
3061 Label slow_case;
3062 Label done;
3063 Label initialize_header;
3064 Label initialize_object; // including clearing the fields
3065
3066 Register RallocatedObject = Otos_i;
3067 Register RinstanceKlass = O1;
3068 Register Roffset = O3;
3069 Register Rscratch = O4;
3070
3071 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3072 __ get_cpool_and_tags(Rscratch, G3_scratch);
3073 // make sure the class we're about to instantiate has been resolved
3074 __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
3075 __ ldub(G3_scratch, Roffset, G3_scratch);
3076 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3077 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3078 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3079
3080 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
3081 __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
3082 __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
3083
3084 // make sure klass is fully initialized:
3085 __ ld(RinstanceKlass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_scratch);
3086 __ cmp(G3_scratch, instanceKlass::fully_initialized);
3087 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3088 __ delayed()->ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
3089
3090 // get instance_size in instanceKlass (already aligned)
3091 //__ ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
3092
3093 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
3094 __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
3095 __ br(Assembler::notZero, false, Assembler::pn, slow_case);
3096 __ delayed()->nop();
3097
3098 // allocate the instance
3099 // 1) Try to allocate in the TLAB
3100 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
3101 // 3) if the above fails (or is not applicable), go to a slow case
3102 // (creates a new TLAB, etc.)
3103
3104 const bool allow_shared_alloc =
3105 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3106
3107 if(UseTLAB) {
3108 Register RoldTopValue = RallocatedObject;
3109 Register RtopAddr = G3_scratch, RtlabWasteLimitValue = G3_scratch;
3110 Register RnewTopValue = G1_scratch;
3111 Register RendValue = Rscratch;
3112 Register RfreeValue = RnewTopValue;
3113
3114 // check if we can allocate in the TLAB
3115 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
3116 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
3117 __ add(RoldTopValue, Roffset, RnewTopValue);
3118
3119 // if there is enough space, we do not CAS and do not clear
3120 __ cmp(RnewTopValue, RendValue);
3121 if(ZeroTLAB) {
3122 // the fields have already been cleared
3123 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
3124 } else {
3125 // initialize both the header and fields
3126 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
3127 }
3128 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3129
3130 if (allow_shared_alloc) {
3131 // Check if tlab should be discarded (refill_waste_limit >= free)
3132 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
3133 __ sub(RendValue, RoldTopValue, RfreeValue);
3134 #ifdef _LP64
3135 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
3136 #else
3137 __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
3138 #endif
3139 __ cmp(RtlabWasteLimitValue, RfreeValue);
3140 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, slow_case); // tlab waste is small
3141 __ delayed()->nop();
3142
3143 // increment waste limit to prevent getting stuck on this slow path
3144 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
3145 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3146 } else {
3147 // No allocation in the shared eden.
3148 __ br(Assembler::always, false, Assembler::pt, slow_case);
3149 __ delayed()->nop();
3150 }
3151 }
3152
3153 // Allocation in the shared Eden
3154 if (allow_shared_alloc) {
3155 Register RoldTopValue = G1_scratch;
3156 Register RtopAddr = G3_scratch;
3157 Register RnewTopValue = RallocatedObject;
3158 Register RendValue = Rscratch;
3159
3160 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
3161
3162 Label retry;
3163 __ bind(retry);
3164 __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
3165 __ ld_ptr(RendValue, 0, RendValue);
3166 __ ld_ptr(RtopAddr, 0, RoldTopValue);
3167 __ add(RoldTopValue, Roffset, RnewTopValue);
3168
3169 // RnewTopValue contains the top address after the new object
3170 // has been allocated.
3171 __ cmp(RnewTopValue, RendValue);
3172 __ brx(Assembler::greaterUnsigned, false, Assembler::pn, slow_case);
3173 __ delayed()->nop();
3174
3175 __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
3176 VM_Version::v9_instructions_work() ? NULL :
3177 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3178
3179 // if someone beat us on the allocation, try again, otherwise continue
3180 __ cmp(RoldTopValue, RnewTopValue);
3181 __ brx(Assembler::notEqual, false, Assembler::pn, retry);
3182 __ delayed()->nop();
3183 }
3184
3185 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3186 // clear object fields
3187 __ bind(initialize_object);
3188 __ deccc(Roffset, sizeof(oopDesc));
3189 __ br(Assembler::zero, false, Assembler::pt, initialize_header);
3190 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
3191
3192 // initialize remaining object fields
3193 { Label loop;
3194 __ subcc(Roffset, wordSize, Roffset);
3195 __ bind(loop);
3196 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot
3197 __ st_ptr(G0, G3_scratch, Roffset);
3198 __ br(Assembler::notEqual, false, Assembler::pt, loop);
3199 __ delayed()->subcc(Roffset, wordSize, Roffset);
3200 }
3201 __ br(Assembler::always, false, Assembler::pt, initialize_header);
3202 __ delayed()->nop();
3203 }
3204
3205 // slow case
3206 __ bind(slow_case);
3207 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned);
3208 __ get_constant_pool(O1);
3209
3210 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
3211
3212 __ ba(false, done);
3213 __ delayed()->nop();
3214
3215 // Initialize the header: mark, klass
3216 __ bind(initialize_header);
3217
3218 if (UseBiasedLocking) {
3219 __ ld_ptr(RinstanceKlass, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), G4_scratch);
3220 } else {
3221 __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
3222 }
3223 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
3224 __ st_ptr(RinstanceKlass, RallocatedObject, oopDesc::klass_offset_in_bytes()); // klass
3225
3226 {
3227 SkipIfEqual skip_if(
3228 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero);
3229 // Trigger dtrace event
3230 __ push(atos);
3231 __ call_VM_leaf(noreg,
3232 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0);
3233 __ pop(atos);
3234 }
3235
3236 // continue
3237 __ bind(done);
3238 }
3239
3240
3241
3242 void TemplateTable::newarray() {
3243 transition(itos, atos);
3244 __ ldub(Lbcp, 1, O1);
3245 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i);
3246 }
3247
3248
3249 void TemplateTable::anewarray() {
3250 transition(itos, atos);
3251 __ get_constant_pool(O1);
3252 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned);
3253 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i);
3254 }
3255
3256
3257 void TemplateTable::arraylength() {
3258 transition(atos, itos);
3259 Label ok;
3260 __ verify_oop(Otos_i);
3261 __ tst(Otos_i);
3262 __ throw_if_not_1_x( Assembler::notZero, ok );
3263 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i);
3264 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3265 }
3266
3267
3268 void TemplateTable::checkcast() {
3269 transition(atos, atos);
3270 Label done, is_null, quicked, cast_ok, resolved;
3271 Register Roffset = G1_scratch;
3272 Register RobjKlass = O5;
3273 Register RspecifiedKlass = O4;
3274
3275 // Check for casting a NULL
3276 __ br_null(Otos_i, false, Assembler::pn, is_null);
3277 __ delayed()->nop();
3278
3279 // Get value klass in RobjKlass
3280 __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
3281
3282 // Get constant pool tag
3283 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3284
3285 // See if the checkcast has been quickened
3286 __ get_cpool_and_tags(Lscratch, G3_scratch);
3287 __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
3288 __ ldub(G3_scratch, Roffset, G3_scratch);
3289 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3290 __ br(Assembler::equal, true, Assembler::pt, quicked);
3291 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3292
3293 __ push_ptr(); // save receiver for result, and for GC
3294 call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3295 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3296
3297 __ br(Assembler::always, false, Assembler::pt, resolved);
3298 __ delayed()->ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
3299
3300 // Extract target class from constant pool
3301 __ bind(quicked);
3302 __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
3303 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3304 __ bind(resolved);
3305
3306 // Generate a fast subtype check. Branch to cast_ok if no
3307 // failure. Throw exception if failure.
3308 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok );
3309
3310 // Not a subtype; so must throw exception
3311 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch );
3312
3313 __ bind(cast_ok);
3314
3315 if (ProfileInterpreter) {
3316 __ ba(false, done);
3317 __ delayed()->nop();
3318 }
3319 __ bind(is_null);
3320 __ profile_null_seen(G3_scratch);
3321 __ bind(done);
3322 }
3323
3324
3325 void TemplateTable::instanceof() {
3326 Label done, is_null, quicked, resolved;
3327 transition(atos, itos);
3328 Register Roffset = G1_scratch;
3329 Register RobjKlass = O5;
3330 Register RspecifiedKlass = O4;
3331
3332 // Check for casting a NULL
3333 __ br_null(Otos_i, false, Assembler::pt, is_null);
3334 __ delayed()->nop();
3335
3336 // Get value klass in RobjKlass
3337 __ ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
3338
3339 // Get constant pool tag
3340 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3341
3342 // See if the checkcast has been quickened
3343 __ get_cpool_and_tags(Lscratch, G3_scratch);
3344 __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
3345 __ ldub(G3_scratch, Roffset, G3_scratch);
3346 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3347 __ br(Assembler::equal, true, Assembler::pt, quicked);
3348 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3349
3350 __ push_ptr(); // save receiver for result, and for GC
3351 call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3352 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3353
3354 __ br(Assembler::always, false, Assembler::pt, resolved);
3355 __ delayed()->ld_ptr(Otos_i, oopDesc::klass_offset_in_bytes(), RobjKlass); // get value klass
3356
3357
3358 // Extract target class from constant pool
3359 __ bind(quicked);
3360 __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
3361 __ get_constant_pool(Lscratch);
3362 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3363 __ bind(resolved);
3364
3365 // Generate a fast subtype check. Branch to cast_ok if no
3366 // failure. Return 0 if failure.
3367 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed
3368 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done );
3369 // Not a subtype; return 0;
3370 __ clr( Otos_i );
3371
3372 if (ProfileInterpreter) {
3373 __ ba(false, done);
3374 __ delayed()->nop();
3375 }
3376 __ bind(is_null);
3377 __ profile_null_seen(G3_scratch);
3378 __ bind(done);
3379 }
3380
3381 void TemplateTable::_breakpoint() {
3382
3383 // Note: We get here even if we are single stepping..
3384 // jbug inists on setting breakpoints at every bytecode
3385 // even if we are in single step mode.
3386
3387 transition(vtos, vtos);
3388 // get the unpatched byte code
3389 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
3390 __ mov(O0, Lbyte_code);
3391
3392 // post the breakpoint event
3393 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp);
3394
3395 // complete the execution of original bytecode
3396 __ dispatch_normal(vtos);
3397 }
3398
3399
3400 //----------------------------------------------------------------------------------------------------
3401 // Exceptions
3402
3403 void TemplateTable::athrow() {
3404 transition(atos, vtos);
3405
3406 // This works because exception is cached in Otos_i which is same as O0,
3407 // which is same as what throw_exception_entry_expects
3408 assert(Otos_i == Oexception, "see explanation above");
3409
3410 __ verify_oop(Otos_i);
3411 __ null_check(Otos_i);
3412 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
3413 }
3414
3415
3416 //----------------------------------------------------------------------------------------------------
3417 // Synchronization
3418
3419
3420 // See frame_sparc.hpp for monitor block layout.
3421 // Monitor elements are dynamically allocated by growing stack as needed.
3422
3423 void TemplateTable::monitorenter() {
3424 transition(atos, vtos);
3425 __ verify_oop(Otos_i);
3426 // Try to acquire a lock on the object
3427 // Repeat until succeeded (i.e., until
3428 // monitorenter returns true).
3429
3430 { Label ok;
3431 __ tst(Otos_i);
3432 __ throw_if_not_1_x( Assembler::notZero, ok);
3433 __ delayed()->mov(Otos_i, Lscratch); // save obj
3434 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3435 }
3436
3437 assert(O0 == Otos_i, "Be sure where the object to lock is");
3438
3439 // find a free slot in the monitor block
3440
3441
3442 // initialize entry pointer
3443 __ clr(O1); // points to free slot or NULL
3444
3445 {
3446 Label entry, loop, exit;
3447 __ add( __ top_most_monitor(), O2 ); // last one to check
3448 __ ba( false, entry );
3449 __ delayed()->mov( Lmonitors, O3 ); // first one to check
3450
3451
3452 __ bind( loop );
3453
3454 __ verify_oop(O4); // verify each monitor's oop
3455 __ tst(O4); // is this entry unused?
3456 if (VM_Version::v9_instructions_work())
3457 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
3458 else {
3459 Label L;
3460 __ br( Assembler::zero, true, Assembler::pn, L );
3461 __ delayed()->mov(O3, O1); // rememeber this one if match
3462 __ bind(L);
3463 }
3464
3465 __ cmp(O4, O0); // check if current entry is for same object
3466 __ brx( Assembler::equal, false, Assembler::pn, exit );
3467 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one
3468
3469 __ bind( entry );
3470
3471 __ cmp( O3, O2 );
3472 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3473 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4);
3474
3475 __ bind( exit );
3476 }
3477
3478 { Label allocated;
3479
3480 // found free slot?
3481 __ br_notnull(O1, false, Assembler::pn, allocated);
3482 __ delayed()->nop();
3483
3484 __ add_monitor_to_stack( false, O2, O3 );
3485 __ mov(Lmonitors, O1);
3486
3487 __ bind(allocated);
3488 }
3489
3490 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3491 // The object has already been poped from the stack, so the expression stack looks correct.
3492 __ inc(Lbcp);
3493
3494 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object
3495 __ lock_object(O1, O0);
3496
3497 // check if there's enough space on the stack for the monitors after locking
3498 __ generate_stack_overflow_check(0);
3499
3500 // The bcp has already been incremented. Just need to dispatch to next instruction.
3501 __ dispatch_next(vtos);
3502 }
3503
3504
3505 void TemplateTable::monitorexit() {
3506 transition(atos, vtos);
3507 __ verify_oop(Otos_i);
3508 __ tst(Otos_i);
3509 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch );
3510
3511 assert(O0 == Otos_i, "just checking");
3512
3513 { Label entry, loop, found;
3514 __ add( __ top_most_monitor(), O2 ); // last one to check
3515 __ ba(false, entry );
3516 // use Lscratch to hold monitor elem to check, start with most recent monitor,
3517 // By using a local it survives the call to the C routine.
3518 __ delayed()->mov( Lmonitors, Lscratch );
3519
3520 __ bind( loop );
3521
3522 __ verify_oop(O4); // verify each monitor's oop
3523 __ cmp(O4, O0); // check if current entry is for desired object
3524 __ brx( Assembler::equal, true, Assembler::pt, found );
3525 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit
3526
3527 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next
3528
3529 __ bind( entry );
3530
3531 __ cmp( Lscratch, O2 );
3532 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3533 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4);
3534
3535 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3536 __ should_not_reach_here();
3537
3538 __ bind(found);
3539 }
3540 __ unlock_object(O1);
3541 }
3542
3543
3544 //----------------------------------------------------------------------------------------------------
3545 // Wide instructions
3546
3547 void TemplateTable::wide() {
3548 transition(vtos, vtos);
3549 __ ldub(Lbcp, 1, G3_scratch);// get next bc
3550 __ sll(G3_scratch, LogBytesPerWord, G3_scratch);
3551 Address ep(G4_scratch, (address)Interpreter::_wentry_point);
3552 __ load_address(ep);
3553 __ ld_ptr(ep.base(), G3_scratch, G3_scratch);
3554 __ jmp(G3_scratch, G0);
3555 __ delayed()->nop();
3556 // Note: the Lbcp increment step is part of the individual wide bytecode implementations
3557 }
3558
3559
3560 //----------------------------------------------------------------------------------------------------
3561 // Multi arrays
3562
3563 void TemplateTable::multianewarray() {
3564 transition(vtos, atos);
3565 // put ndims * wordSize into Lscratch
3566 __ ldub( Lbcp, 3, Lscratch);
3567 __ sll( Lscratch, Interpreter::logStackElementSize(), Lscratch);
3568 // Lesp points past last_dim, so set to O1 to first_dim address
3569 __ add( Lesp, Lscratch, O1);
3570 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
3571 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack
3572 }
3573 #endif /* !CC_INTERP */