comparison src/cpu/sparc/vm/macroAssembler_sparc.hpp @ 7204:f0c2369fda5a

8003250: SPARC: move MacroAssembler into separate file Reviewed-by: jrose, kvn
author twisti
date Thu, 06 Dec 2012 09:57:41 -0800
parents
children 18d56ca3e901
comparison
equal deleted inserted replaced
7201:c5d414e98fd4 7204:f0c2369fda5a
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP
26 #define CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP
27
28 #include "asm/assembler.hpp"
29
30 // <sys/trap.h> promises that the system will not use traps 16-31
31 #define ST_RESERVED_FOR_USER_0 0x10
32
33 class BiasedLockingCounters;
34
35
36 // Register aliases for parts of the system:
37
38 // 64 bit values can be kept in g1-g5, o1-o5 and o7 and all 64 bits are safe
39 // across context switches in V8+ ABI. Of course, there are no 64 bit regs
40 // in V8 ABI. All 64 bits are preserved in V9 ABI for all registers.
41
42 // g2-g4 are scratch registers called "application globals". Their
43 // meaning is reserved to the "compilation system"--which means us!
44 // They are are not supposed to be touched by ordinary C code, although
45 // highly-optimized C code might steal them for temps. They are safe
46 // across thread switches, and the ABI requires that they be safe
47 // across function calls.
48 //
49 // g1 and g3 are touched by more modules. V8 allows g1 to be clobbered
50 // across func calls, and V8+ also allows g5 to be clobbered across
51 // func calls. Also, g1 and g5 can get touched while doing shared
52 // library loading.
53 //
54 // We must not touch g7 (it is the thread-self register) and g6 is
55 // reserved for certain tools. g0, of course, is always zero.
56 //
57 // (Sources: SunSoft Compilers Group, thread library engineers.)
58
59 // %%%% The interpreter should be revisited to reduce global scratch regs.
60
61 // This global always holds the current JavaThread pointer:
62
63 REGISTER_DECLARATION(Register, G2_thread , G2);
64 REGISTER_DECLARATION(Register, G6_heapbase , G6);
65
66 // The following globals are part of the Java calling convention:
67
68 REGISTER_DECLARATION(Register, G5_method , G5);
69 REGISTER_DECLARATION(Register, G5_megamorphic_method , G5_method);
70 REGISTER_DECLARATION(Register, G5_inline_cache_reg , G5_method);
71
72 // The following globals are used for the new C1 & interpreter calling convention:
73 REGISTER_DECLARATION(Register, Gargs , G4); // pointing to the last argument
74
75 // This local is used to preserve G2_thread in the interpreter and in stubs:
76 REGISTER_DECLARATION(Register, L7_thread_cache , L7);
77
78 // These globals are used as scratch registers in the interpreter:
79
80 REGISTER_DECLARATION(Register, Gframe_size , G1); // SAME REG as G1_scratch
81 REGISTER_DECLARATION(Register, G1_scratch , G1); // also SAME
82 REGISTER_DECLARATION(Register, G3_scratch , G3);
83 REGISTER_DECLARATION(Register, G4_scratch , G4);
84
85 // These globals are used as short-lived scratch registers in the compiler:
86
87 REGISTER_DECLARATION(Register, Gtemp , G5);
88
89 // JSR 292 fixed register usages:
90 REGISTER_DECLARATION(Register, G5_method_type , G5);
91 REGISTER_DECLARATION(Register, G3_method_handle , G3);
92 REGISTER_DECLARATION(Register, L7_mh_SP_save , L7);
93
94 // The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
95 // because a single patchable "set" instruction (NativeMovConstReg,
96 // or NativeMovConstPatching for compiler1) instruction
97 // serves to set up either quantity, depending on whether the compiled
98 // call site is an inline cache or is megamorphic. See the function
99 // CompiledIC::set_to_megamorphic.
100 //
101 // If a inline cache targets an interpreted method, then the
102 // G5 register will be used twice during the call. First,
103 // the call site will be patched to load a compiledICHolder
104 // into G5. (This is an ordered pair of ic_klass, method.)
105 // The c2i adapter will first check the ic_klass, then load
106 // G5_method with the method part of the pair just before
107 // jumping into the interpreter.
108 //
109 // Note that G5_method is only the method-self for the interpreter,
110 // and is logically unrelated to G5_megamorphic_method.
111 //
112 // Invariants on G2_thread (the JavaThread pointer):
113 // - it should not be used for any other purpose anywhere
114 // - it must be re-initialized by StubRoutines::call_stub()
115 // - it must be preserved around every use of call_VM
116
117 // We can consider using g2/g3/g4 to cache more values than the
118 // JavaThread, such as the card-marking base or perhaps pointers into
119 // Eden. It's something of a waste to use them as scratch temporaries,
120 // since they are not supposed to be volatile. (Of course, if we find
121 // that Java doesn't benefit from application globals, then we can just
122 // use them as ordinary temporaries.)
123 //
124 // Since g1 and g5 (and/or g6) are the volatile (caller-save) registers,
125 // it makes sense to use them routinely for procedure linkage,
126 // whenever the On registers are not applicable. Examples: G5_method,
127 // G5_inline_cache_klass, and a double handful of miscellaneous compiler
128 // stubs. This means that compiler stubs, etc., should be kept to a
129 // maximum of two or three G-register arguments.
130
131
132 // stub frames
133
134 REGISTER_DECLARATION(Register, Lentry_args , L0); // pointer to args passed to callee (interpreter) not stub itself
135
136 // Interpreter frames
137
138 #ifdef CC_INTERP
139 REGISTER_DECLARATION(Register, Lstate , L0); // interpreter state object pointer
140 REGISTER_DECLARATION(Register, L1_scratch , L1); // scratch
141 REGISTER_DECLARATION(Register, Lmirror , L1); // mirror (for native methods only)
142 REGISTER_DECLARATION(Register, L2_scratch , L2);
143 REGISTER_DECLARATION(Register, L3_scratch , L3);
144 REGISTER_DECLARATION(Register, L4_scratch , L4);
145 REGISTER_DECLARATION(Register, Lscratch , L5); // C1 uses
146 REGISTER_DECLARATION(Register, Lscratch2 , L6); // C1 uses
147 REGISTER_DECLARATION(Register, L7_scratch , L7); // constant pool cache
148 REGISTER_DECLARATION(Register, O5_savedSP , O5);
149 REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
150 // a copy SP, so in 64-bit it's a biased value. The bias
151 // is added and removed as needed in the frame code.
152 // Interface to signature handler
153 REGISTER_DECLARATION(Register, Llocals , L7); // pointer to locals for signature handler
154 REGISTER_DECLARATION(Register, Lmethod , L6); // Method* when calling signature handler
155
156 #else
157 REGISTER_DECLARATION(Register, Lesp , L0); // expression stack pointer
158 REGISTER_DECLARATION(Register, Lbcp , L1); // pointer to next bytecode
159 REGISTER_DECLARATION(Register, Lmethod , L2);
160 REGISTER_DECLARATION(Register, Llocals , L3);
161 REGISTER_DECLARATION(Register, Largs , L3); // pointer to locals for signature handler
162 // must match Llocals in asm interpreter
163 REGISTER_DECLARATION(Register, Lmonitors , L4);
164 REGISTER_DECLARATION(Register, Lbyte_code , L5);
165 // When calling out from the interpreter we record SP so that we can remove any extra stack
166 // space allocated during adapter transitions. This register is only live from the point
167 // of the call until we return.
168 REGISTER_DECLARATION(Register, Llast_SP , L5);
169 REGISTER_DECLARATION(Register, Lscratch , L5);
170 REGISTER_DECLARATION(Register, Lscratch2 , L6);
171 REGISTER_DECLARATION(Register, LcpoolCache , L6); // constant pool cache
172
173 REGISTER_DECLARATION(Register, O5_savedSP , O5);
174 REGISTER_DECLARATION(Register, I5_savedSP , I5); // Saved SP before bumping for locals. This is simply
175 // a copy SP, so in 64-bit it's a biased value. The bias
176 // is added and removed as needed in the frame code.
177 REGISTER_DECLARATION(Register, IdispatchTables , I4); // Base address of the bytecode dispatch tables
178 REGISTER_DECLARATION(Register, IdispatchAddress , I3); // Register which saves the dispatch address for each bytecode
179 REGISTER_DECLARATION(Register, ImethodDataPtr , I2); // Pointer to the current method data
180 #endif /* CC_INTERP */
181
182 // NOTE: Lscratch2 and LcpoolCache point to the same registers in
183 // the interpreter code. If Lscratch2 needs to be used for some
184 // purpose than LcpoolCache should be restore after that for
185 // the interpreter to work right
186 // (These assignments must be compatible with L7_thread_cache; see above.)
187
188 // Since Lbcp points into the middle of the method object,
189 // it is temporarily converted into a "bcx" during GC.
190
191 // Exception processing
192 // These registers are passed into exception handlers.
193 // All exception handlers require the exception object being thrown.
194 // In addition, an nmethod's exception handler must be passed
195 // the address of the call site within the nmethod, to allow
196 // proper selection of the applicable catch block.
197 // (Interpreter frames use their own bcp() for this purpose.)
198 //
199 // The Oissuing_pc value is not always needed. When jumping to a
200 // handler that is known to be interpreted, the Oissuing_pc value can be
201 // omitted. An actual catch block in compiled code receives (from its
202 // nmethod's exception handler) the thrown exception in the Oexception,
203 // but it doesn't need the Oissuing_pc.
204 //
205 // If an exception handler (either interpreted or compiled)
206 // discovers there is no applicable catch block, it updates
207 // the Oissuing_pc to the continuation PC of its own caller,
208 // pops back to that caller's stack frame, and executes that
209 // caller's exception handler. Obviously, this process will
210 // iterate until the control stack is popped back to a method
211 // containing an applicable catch block. A key invariant is
212 // that the Oissuing_pc value is always a value local to
213 // the method whose exception handler is currently executing.
214 //
215 // Note: The issuing PC value is __not__ a raw return address (I7 value).
216 // It is a "return pc", the address __following__ the call.
217 // Raw return addresses are converted to issuing PCs by frame::pc(),
218 // or by stubs. Issuing PCs can be used directly with PC range tables.
219 //
220 REGISTER_DECLARATION(Register, Oexception , O0); // exception being thrown
221 REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is coming from
222
223
224 // These must occur after the declarations above
225 #ifndef DONT_USE_REGISTER_DEFINES
226
227 #define Gthread AS_REGISTER(Register, Gthread)
228 #define Gmethod AS_REGISTER(Register, Gmethod)
229 #define Gmegamorphic_method AS_REGISTER(Register, Gmegamorphic_method)
230 #define Ginline_cache_reg AS_REGISTER(Register, Ginline_cache_reg)
231 #define Gargs AS_REGISTER(Register, Gargs)
232 #define Lthread_cache AS_REGISTER(Register, Lthread_cache)
233 #define Gframe_size AS_REGISTER(Register, Gframe_size)
234 #define Gtemp AS_REGISTER(Register, Gtemp)
235
236 #ifdef CC_INTERP
237 #define Lstate AS_REGISTER(Register, Lstate)
238 #define Lesp AS_REGISTER(Register, Lesp)
239 #define L1_scratch AS_REGISTER(Register, L1_scratch)
240 #define Lmirror AS_REGISTER(Register, Lmirror)
241 #define L2_scratch AS_REGISTER(Register, L2_scratch)
242 #define L3_scratch AS_REGISTER(Register, L3_scratch)
243 #define L4_scratch AS_REGISTER(Register, L4_scratch)
244 #define Lscratch AS_REGISTER(Register, Lscratch)
245 #define Lscratch2 AS_REGISTER(Register, Lscratch2)
246 #define L7_scratch AS_REGISTER(Register, L7_scratch)
247 #define Ostate AS_REGISTER(Register, Ostate)
248 #else
249 #define Lesp AS_REGISTER(Register, Lesp)
250 #define Lbcp AS_REGISTER(Register, Lbcp)
251 #define Lmethod AS_REGISTER(Register, Lmethod)
252 #define Llocals AS_REGISTER(Register, Llocals)
253 #define Lmonitors AS_REGISTER(Register, Lmonitors)
254 #define Lbyte_code AS_REGISTER(Register, Lbyte_code)
255 #define Lscratch AS_REGISTER(Register, Lscratch)
256 #define Lscratch2 AS_REGISTER(Register, Lscratch2)
257 #define LcpoolCache AS_REGISTER(Register, LcpoolCache)
258 #endif /* ! CC_INTERP */
259
260 #define Lentry_args AS_REGISTER(Register, Lentry_args)
261 #define I5_savedSP AS_REGISTER(Register, I5_savedSP)
262 #define O5_savedSP AS_REGISTER(Register, O5_savedSP)
263 #define IdispatchAddress AS_REGISTER(Register, IdispatchAddress)
264 #define ImethodDataPtr AS_REGISTER(Register, ImethodDataPtr)
265 #define IdispatchTables AS_REGISTER(Register, IdispatchTables)
266
267 #define Oexception AS_REGISTER(Register, Oexception)
268 #define Oissuing_pc AS_REGISTER(Register, Oissuing_pc)
269
270 #endif
271
272
273 // Address is an abstraction used to represent a memory location.
274 //
275 // Note: A register location is represented via a Register, not
276 // via an address for efficiency & simplicity reasons.
277
278 class Address VALUE_OBJ_CLASS_SPEC {
279 private:
280 Register _base; // Base register.
281 RegisterOrConstant _index_or_disp; // Index register or constant displacement.
282 RelocationHolder _rspec;
283
284 public:
285 Address() : _base(noreg), _index_or_disp(noreg) {}
286
287 Address(Register base, RegisterOrConstant index_or_disp)
288 : _base(base),
289 _index_or_disp(index_or_disp) {
290 }
291
292 Address(Register base, Register index)
293 : _base(base),
294 _index_or_disp(index) {
295 }
296
297 Address(Register base, int disp)
298 : _base(base),
299 _index_or_disp(disp) {
300 }
301
302 #ifdef ASSERT
303 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
304 Address(Register base, ByteSize disp)
305 : _base(base),
306 _index_or_disp(in_bytes(disp)) {
307 }
308 #endif
309
310 // accessors
311 Register base() const { return _base; }
312 Register index() const { return _index_or_disp.as_register(); }
313 int disp() const { return _index_or_disp.as_constant(); }
314
315 bool has_index() const { return _index_or_disp.is_register(); }
316 bool has_disp() const { return _index_or_disp.is_constant(); }
317
318 bool uses(Register reg) const { return base() == reg || (has_index() && index() == reg); }
319
320 const relocInfo::relocType rtype() { return _rspec.type(); }
321 const RelocationHolder& rspec() { return _rspec; }
322
323 RelocationHolder rspec(int offset) const {
324 return offset == 0 ? _rspec : _rspec.plus(offset);
325 }
326
327 inline bool is_simm13(int offset = 0); // check disp+offset for overflow
328
329 Address plus_disp(int plusdisp) const { // bump disp by a small amount
330 assert(_index_or_disp.is_constant(), "must have a displacement");
331 Address a(base(), disp() + plusdisp);
332 return a;
333 }
334 bool is_same_address(Address a) const {
335 // disregard _rspec
336 return base() == a.base() && (has_index() ? index() == a.index() : disp() == a.disp());
337 }
338
339 Address after_save() const {
340 Address a = (*this);
341 a._base = a._base->after_save();
342 return a;
343 }
344
345 Address after_restore() const {
346 Address a = (*this);
347 a._base = a._base->after_restore();
348 return a;
349 }
350
351 // Convert the raw encoding form into the form expected by the
352 // constructor for Address.
353 static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
354
355 friend class Assembler;
356 };
357
358
359 class AddressLiteral VALUE_OBJ_CLASS_SPEC {
360 private:
361 address _address;
362 RelocationHolder _rspec;
363
364 RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) {
365 switch (rtype) {
366 case relocInfo::external_word_type:
367 return external_word_Relocation::spec(addr);
368 case relocInfo::internal_word_type:
369 return internal_word_Relocation::spec(addr);
370 #ifdef _LP64
371 case relocInfo::opt_virtual_call_type:
372 return opt_virtual_call_Relocation::spec();
373 case relocInfo::static_call_type:
374 return static_call_Relocation::spec();
375 case relocInfo::runtime_call_type:
376 return runtime_call_Relocation::spec();
377 #endif
378 case relocInfo::none:
379 return RelocationHolder();
380 default:
381 ShouldNotReachHere();
382 return RelocationHolder();
383 }
384 }
385
386 protected:
387 // creation
388 AddressLiteral() : _address(NULL), _rspec(NULL) {}
389
390 public:
391 AddressLiteral(address addr, RelocationHolder const& rspec)
392 : _address(addr),
393 _rspec(rspec) {}
394
395 // Some constructors to avoid casting at the call site.
396 AddressLiteral(jobject obj, RelocationHolder const& rspec)
397 : _address((address) obj),
398 _rspec(rspec) {}
399
400 AddressLiteral(intptr_t value, RelocationHolder const& rspec)
401 : _address((address) value),
402 _rspec(rspec) {}
403
404 AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none)
405 : _address((address) addr),
406 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
407
408 // Some constructors to avoid casting at the call site.
409 AddressLiteral(address* addr, relocInfo::relocType rtype = relocInfo::none)
410 : _address((address) addr),
411 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
412
413 AddressLiteral(bool* addr, relocInfo::relocType rtype = relocInfo::none)
414 : _address((address) addr),
415 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
416
417 AddressLiteral(const bool* addr, relocInfo::relocType rtype = relocInfo::none)
418 : _address((address) addr),
419 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
420
421 AddressLiteral(signed char* addr, relocInfo::relocType rtype = relocInfo::none)
422 : _address((address) addr),
423 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
424
425 AddressLiteral(int* addr, relocInfo::relocType rtype = relocInfo::none)
426 : _address((address) addr),
427 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
428
429 AddressLiteral(intptr_t addr, relocInfo::relocType rtype = relocInfo::none)
430 : _address((address) addr),
431 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
432
433 #ifdef _LP64
434 // 32-bit complains about a multiple declaration for int*.
435 AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
436 : _address((address) addr),
437 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
438 #endif
439
440 AddressLiteral(Metadata* addr, relocInfo::relocType rtype = relocInfo::none)
441 : _address((address) addr),
442 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
443
444 AddressLiteral(Metadata** addr, relocInfo::relocType rtype = relocInfo::none)
445 : _address((address) addr),
446 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
447
448 AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
449 : _address((address) addr),
450 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
451
452 AddressLiteral(double* addr, relocInfo::relocType rtype = relocInfo::none)
453 : _address((address) addr),
454 _rspec(rspec_from_rtype(rtype, (address) addr)) {}
455
456 intptr_t value() const { return (intptr_t) _address; }
457 int low10() const;
458
459 const relocInfo::relocType rtype() const { return _rspec.type(); }
460 const RelocationHolder& rspec() const { return _rspec; }
461
462 RelocationHolder rspec(int offset) const {
463 return offset == 0 ? _rspec : _rspec.plus(offset);
464 }
465 };
466
467 // Convenience classes
468 class ExternalAddress: public AddressLiteral {
469 private:
470 static relocInfo::relocType reloc_for_target(address target) {
471 // Sometimes ExternalAddress is used for values which aren't
472 // exactly addresses, like the card table base.
473 // external_word_type can't be used for values in the first page
474 // so just skip the reloc in that case.
475 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
476 }
477
478 public:
479 ExternalAddress(address target) : AddressLiteral(target, reloc_for_target( target)) {}
480 ExternalAddress(Metadata** target) : AddressLiteral(target, reloc_for_target((address) target)) {}
481 };
482
483 inline Address RegisterImpl::address_in_saved_window() const {
484 return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
485 }
486
487
488
489 // Argument is an abstraction used to represent an outgoing
490 // actual argument or an incoming formal parameter, whether
491 // it resides in memory or in a register, in a manner consistent
492 // with the SPARC Application Binary Interface, or ABI. This is
493 // often referred to as the native or C calling convention.
494
495 class Argument VALUE_OBJ_CLASS_SPEC {
496 private:
497 int _number;
498 bool _is_in;
499
500 public:
501 #ifdef _LP64
502 enum {
503 n_register_parameters = 6, // only 6 registers may contain integer parameters
504 n_float_register_parameters = 16 // Can have up to 16 floating registers
505 };
506 #else
507 enum {
508 n_register_parameters = 6 // only 6 registers may contain integer parameters
509 };
510 #endif
511
512 // creation
513 Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
514
515 int number() const { return _number; }
516 bool is_in() const { return _is_in; }
517 bool is_out() const { return !is_in(); }
518
519 Argument successor() const { return Argument(number() + 1, is_in()); }
520 Argument as_in() const { return Argument(number(), true ); }
521 Argument as_out() const { return Argument(number(), false); }
522
523 // locating register-based arguments:
524 bool is_register() const { return _number < n_register_parameters; }
525
526 #ifdef _LP64
527 // locating Floating Point register-based arguments:
528 bool is_float_register() const { return _number < n_float_register_parameters; }
529
530 FloatRegister as_float_register() const {
531 assert(is_float_register(), "must be a register argument");
532 return as_FloatRegister(( number() *2 ) + 1);
533 }
534 FloatRegister as_double_register() const {
535 assert(is_float_register(), "must be a register argument");
536 return as_FloatRegister(( number() *2 ));
537 }
538 #endif
539
540 Register as_register() const {
541 assert(is_register(), "must be a register argument");
542 return is_in() ? as_iRegister(number()) : as_oRegister(number());
543 }
544
545 // locating memory-based arguments
546 Address as_address() const {
547 assert(!is_register(), "must be a memory argument");
548 return address_in_frame();
549 }
550
551 // When applied to a register-based argument, give the corresponding address
552 // into the 6-word area "into which callee may store register arguments"
553 // (This is a different place than the corresponding register-save area location.)
554 Address address_in_frame() const;
555
556 // debugging
557 const char* name() const;
558
559 friend class Assembler;
560 };
561
562
563 class RegistersForDebugging : public StackObj {
564 public:
565 intptr_t i[8], l[8], o[8], g[8];
566 float f[32];
567 double d[32];
568
569 void print(outputStream* s);
570
571 static int i_offset(int j) { return offset_of(RegistersForDebugging, i[j]); }
572 static int l_offset(int j) { return offset_of(RegistersForDebugging, l[j]); }
573 static int o_offset(int j) { return offset_of(RegistersForDebugging, o[j]); }
574 static int g_offset(int j) { return offset_of(RegistersForDebugging, g[j]); }
575 static int f_offset(int j) { return offset_of(RegistersForDebugging, f[j]); }
576 static int d_offset(int j) { return offset_of(RegistersForDebugging, d[j / 2]); }
577
578 // gen asm code to save regs
579 static void save_registers(MacroAssembler* a);
580
581 // restore global registers in case C code disturbed them
582 static void restore_registers(MacroAssembler* a, Register r);
583 };
584
585
586 // MacroAssembler extends Assembler by a few frequently used macros.
587 //
588 // Most of the standard SPARC synthetic ops are defined here.
589 // Instructions for which a 'better' code sequence exists depending
590 // on arguments should also go in here.
591
592 #define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__)
593 #define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__)
594 #define JUMP(a, temp, off) jump(a, temp, off, __FILE__, __LINE__)
595 #define JUMPL(a, temp, d, off) jumpl(a, temp, d, off, __FILE__, __LINE__)
596
597
598 class MacroAssembler : public Assembler {
599 // code patchers need various routines like inv_wdisp()
600 friend class NativeInstruction;
601 friend class NativeGeneralJump;
602 friend class Relocation;
603 friend class Label;
604
605 protected:
606 static void print_instruction(int inst);
607 static int patched_branch(int dest_pos, int inst, int inst_pos);
608 static int branch_destination(int inst, int pos);
609
610 // Support for VM calls
611 // This is the base routine called by the different versions of call_VM_leaf. The interpreter
612 // may customize this version by overriding it for its purposes (e.g., to save/restore
613 // additional registers when doing a VM call).
614 #ifdef CC_INTERP
615 #define VIRTUAL
616 #else
617 #define VIRTUAL virtual
618 #endif
619
620 VIRTUAL void call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments);
621
622 //
623 // It is imperative that all calls into the VM are handled via the call_VM macros.
624 // They make sure that the stack linkage is setup correctly. call_VM's correspond
625 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
626 //
627 // This is the base routine called by the different versions of call_VM. The interpreter
628 // may customize this version by overriding it for its purposes (e.g., to save/restore
629 // additional registers when doing a VM call).
630 //
631 // A non-volatile java_thread_cache register should be specified so
632 // that the G2_thread value can be preserved across the call.
633 // (If java_thread_cache is noreg, then a slow get_thread call
634 // will re-initialize the G2_thread.) call_VM_base returns the register that contains the
635 // thread.
636 //
637 // If no last_java_sp is specified (noreg) than SP will be used instead.
638
639 virtual void call_VM_base(
640 Register oop_result, // where an oop-result ends up if any; use noreg otherwise
641 Register java_thread_cache, // the thread if computed before ; use noreg otherwise
642 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise
643 address entry_point, // the entry point
644 int number_of_arguments, // the number of arguments (w/o thread) to pop after call
645 bool check_exception=true // flag which indicates if exception should be checked
646 );
647
648 // This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code.
649 // The implementation is only non-empty for the InterpreterMacroAssembler,
650 // as only the interpreter handles and ForceEarlyReturn PopFrame requests.
651 virtual void check_and_handle_popframe(Register scratch_reg);
652 virtual void check_and_handle_earlyret(Register scratch_reg);
653
654 public:
655 MacroAssembler(CodeBuffer* code) : Assembler(code) {}
656
657 // Support for NULL-checks
658 //
659 // Generates code that causes a NULL OS exception if the content of reg is NULL.
660 // If the accessed location is M[reg + offset] and the offset is known, provide the
661 // offset. No explicit code generation is needed if the offset is within a certain
662 // range (0 <= offset <= page_size).
663 //
664 // %%%%%% Currently not done for SPARC
665
666 void null_check(Register reg, int offset = -1);
667 static bool needs_explicit_null_check(intptr_t offset);
668
669 // support for delayed instructions
670 MacroAssembler* delayed() { Assembler::delayed(); return this; }
671
672 // branches that use right instruction for v8 vs. v9
673 inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
674 inline void br( Condition c, bool a, Predict p, Label& L );
675
676 inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
677 inline void fb( Condition c, bool a, Predict p, Label& L );
678
679 // compares register with zero (32 bit) and branches (V9 and V8 instructions)
680 void cmp_zero_and_br( Condition c, Register s1, Label& L, bool a = false, Predict p = pn );
681 // Compares a pointer register with zero and branches on (not)null.
682 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
683 void br_null ( Register s1, bool a, Predict p, Label& L );
684 void br_notnull( Register s1, bool a, Predict p, Label& L );
685
686 //
687 // Compare registers and branch with nop in delay slot or cbcond without delay slot.
688 //
689 // ATTENTION: use these instructions with caution because cbcond instruction
690 // has very short distance: 512 instructions (2Kbyte).
691
692 // Compare integer (32 bit) values (icc only).
693 void cmp_and_br_short(Register s1, Register s2, Condition c, Predict p, Label& L);
694 void cmp_and_br_short(Register s1, int simm13a, Condition c, Predict p, Label& L);
695 // Platform depending version for pointer compare (icc on !LP64 and xcc on LP64).
696 void cmp_and_brx_short(Register s1, Register s2, Condition c, Predict p, Label& L);
697 void cmp_and_brx_short(Register s1, int simm13a, Condition c, Predict p, Label& L);
698
699 // Short branch version for compares a pointer pwith zero.
700 void br_null_short ( Register s1, Predict p, Label& L );
701 void br_notnull_short( Register s1, Predict p, Label& L );
702
703 // unconditional short branch
704 void ba_short(Label& L);
705
706 inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
707 inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
708
709 // Branch that tests xcc in LP64 and icc in !LP64
710 inline void brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
711 inline void brx( Condition c, bool a, Predict p, Label& L );
712
713 // unconditional branch
714 inline void ba( Label& L );
715
716 // Branch that tests fp condition codes
717 inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
718 inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
719
720 // get PC the best way
721 inline int get_pc( Register d );
722
723 // Sparc shorthands(pp 85, V8 manual, pp 289 V9 manual)
724 inline void cmp( Register s1, Register s2 ) { subcc( s1, s2, G0 ); }
725 inline void cmp( Register s1, int simm13a ) { subcc( s1, simm13a, G0 ); }
726
727 inline void jmp( Register s1, Register s2 );
728 inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
729
730 // Check if the call target is out of wdisp30 range (relative to the code cache)
731 static inline bool is_far_target(address d);
732 inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type );
733 inline void call( Label& L, relocInfo::relocType rt = relocInfo::runtime_call_type );
734 inline void callr( Register s1, Register s2 );
735 inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
736
737 // Emits nothing on V8
738 inline void iprefetch( address d, relocInfo::relocType rt = relocInfo::none );
739 inline void iprefetch( Label& L);
740
741 inline void tst( Register s ) { orcc( G0, s, G0 ); }
742
743 #ifdef PRODUCT
744 inline void ret( bool trace = TraceJumps ) { if (trace) {
745 mov(I7, O7); // traceable register
746 JMP(O7, 2 * BytesPerInstWord);
747 } else {
748 jmpl( I7, 2 * BytesPerInstWord, G0 );
749 }
750 }
751
752 inline void retl( bool trace = TraceJumps ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
753 else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
754 #else
755 void ret( bool trace = TraceJumps );
756 void retl( bool trace = TraceJumps );
757 #endif /* PRODUCT */
758
759 // Required platform-specific helpers for Label::patch_instructions.
760 // They _shadow_ the declarations in AbstractAssembler, which are undefined.
761 void pd_patch_instruction(address branch, address target);
762 #ifndef PRODUCT
763 static void pd_print_patched_instruction(address branch);
764 #endif
765
766 // sethi Macro handles optimizations and relocations
767 private:
768 void internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable);
769 public:
770 void sethi(const AddressLiteral& addrlit, Register d);
771 void patchable_sethi(const AddressLiteral& addrlit, Register d);
772
773 // compute the number of instructions for a sethi/set
774 static int insts_for_sethi( address a, bool worst_case = false );
775 static int worst_case_insts_for_set();
776
777 // set may be either setsw or setuw (high 32 bits may be zero or sign)
778 private:
779 void internal_set(const AddressLiteral& al, Register d, bool ForceRelocatable);
780 static int insts_for_internal_set(intptr_t value);
781 public:
782 void set(const AddressLiteral& addrlit, Register d);
783 void set(intptr_t value, Register d);
784 void set(address addr, Register d, RelocationHolder const& rspec);
785 static int insts_for_set(intptr_t value) { return insts_for_internal_set(value); }
786
787 void patchable_set(const AddressLiteral& addrlit, Register d);
788 void patchable_set(intptr_t value, Register d);
789 void set64(jlong value, Register d, Register tmp);
790 static int insts_for_set64(jlong value);
791
792 // sign-extend 32 to 64
793 inline void signx( Register s, Register d ) { sra( s, G0, d); }
794 inline void signx( Register d ) { sra( d, G0, d); }
795
796 inline void not1( Register s, Register d ) { xnor( s, G0, d ); }
797 inline void not1( Register d ) { xnor( d, G0, d ); }
798
799 inline void neg( Register s, Register d ) { sub( G0, s, d ); }
800 inline void neg( Register d ) { sub( G0, d, d ); }
801
802 inline void cas( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY); }
803 inline void casx( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY); }
804 // Functions for isolating 64 bit atomic swaps for LP64
805 // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
806 inline void cas_ptr( Register s1, Register s2, Register d) {
807 #ifdef _LP64
808 casx( s1, s2, d );
809 #else
810 cas( s1, s2, d );
811 #endif
812 }
813
814 // Functions for isolating 64 bit shifts for LP64
815 inline void sll_ptr( Register s1, Register s2, Register d );
816 inline void sll_ptr( Register s1, int imm6a, Register d );
817 inline void sll_ptr( Register s1, RegisterOrConstant s2, Register d );
818 inline void srl_ptr( Register s1, Register s2, Register d );
819 inline void srl_ptr( Register s1, int imm6a, Register d );
820
821 // little-endian
822 inline void casl( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY_LITTLE); }
823 inline void casxl( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY_LITTLE); }
824
825 inline void inc( Register d, int const13 = 1 ) { add( d, const13, d); }
826 inline void inccc( Register d, int const13 = 1 ) { addcc( d, const13, d); }
827
828 inline void dec( Register d, int const13 = 1 ) { sub( d, const13, d); }
829 inline void deccc( Register d, int const13 = 1 ) { subcc( d, const13, d); }
830
831 using Assembler::add;
832 inline void add(Register s1, int simm13a, Register d, relocInfo::relocType rtype);
833 inline void add(Register s1, int simm13a, Register d, RelocationHolder const& rspec);
834 inline void add(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
835 inline void add(const Address& a, Register d, int offset = 0);
836
837 using Assembler::andn;
838 inline void andn( Register s1, RegisterOrConstant s2, Register d);
839
840 inline void btst( Register s1, Register s2 ) { andcc( s1, s2, G0 ); }
841 inline void btst( int simm13a, Register s ) { andcc( s, simm13a, G0 ); }
842
843 inline void bset( Register s1, Register s2 ) { or3( s1, s2, s2 ); }
844 inline void bset( int simm13a, Register s ) { or3( s, simm13a, s ); }
845
846 inline void bclr( Register s1, Register s2 ) { andn( s1, s2, s2 ); }
847 inline void bclr( int simm13a, Register s ) { andn( s, simm13a, s ); }
848
849 inline void btog( Register s1, Register s2 ) { xor3( s1, s2, s2 ); }
850 inline void btog( int simm13a, Register s ) { xor3( s, simm13a, s ); }
851
852 inline void clr( Register d ) { or3( G0, G0, d ); }
853
854 inline void clrb( Register s1, Register s2);
855 inline void clrh( Register s1, Register s2);
856 inline void clr( Register s1, Register s2);
857 inline void clrx( Register s1, Register s2);
858
859 inline void clrb( Register s1, int simm13a);
860 inline void clrh( Register s1, int simm13a);
861 inline void clr( Register s1, int simm13a);
862 inline void clrx( Register s1, int simm13a);
863
864 // copy & clear upper word
865 inline void clruw( Register s, Register d ) { srl( s, G0, d); }
866 // clear upper word
867 inline void clruwu( Register d ) { srl( d, G0, d); }
868
869 using Assembler::ldsb;
870 using Assembler::ldsh;
871 using Assembler::ldsw;
872 using Assembler::ldub;
873 using Assembler::lduh;
874 using Assembler::lduw;
875 using Assembler::ldx;
876 using Assembler::ldd;
877
878 #ifdef ASSERT
879 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
880 inline void ld(Register s1, ByteSize simm13a, Register d);
881 #endif
882
883 inline void ld(Register s1, Register s2, Register d);
884 inline void ld(Register s1, int simm13a, Register d);
885
886 inline void ldsb(const Address& a, Register d, int offset = 0);
887 inline void ldsh(const Address& a, Register d, int offset = 0);
888 inline void ldsw(const Address& a, Register d, int offset = 0);
889 inline void ldub(const Address& a, Register d, int offset = 0);
890 inline void lduh(const Address& a, Register d, int offset = 0);
891 inline void lduw(const Address& a, Register d, int offset = 0);
892 inline void ldx( const Address& a, Register d, int offset = 0);
893 inline void ld( const Address& a, Register d, int offset = 0);
894 inline void ldd( const Address& a, Register d, int offset = 0);
895
896 inline void ldub(Register s1, RegisterOrConstant s2, Register d );
897 inline void ldsb(Register s1, RegisterOrConstant s2, Register d );
898 inline void lduh(Register s1, RegisterOrConstant s2, Register d );
899 inline void ldsh(Register s1, RegisterOrConstant s2, Register d );
900 inline void lduw(Register s1, RegisterOrConstant s2, Register d );
901 inline void ldsw(Register s1, RegisterOrConstant s2, Register d );
902 inline void ldx( Register s1, RegisterOrConstant s2, Register d );
903 inline void ld( Register s1, RegisterOrConstant s2, Register d );
904 inline void ldd( Register s1, RegisterOrConstant s2, Register d );
905
906 using Assembler::ldf;
907 inline void ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d);
908 inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
909
910 // membar psuedo instruction. takes into account target memory model.
911 inline void membar( Assembler::Membar_mask_bits const7a );
912
913 // returns if membar generates anything.
914 inline bool membar_has_effect( Assembler::Membar_mask_bits const7a );
915
916 // mov pseudo instructions
917 inline void mov( Register s, Register d) {
918 if ( s != d ) or3( G0, s, d);
919 else assert_not_delayed(); // Put something useful in the delay slot!
920 }
921
922 inline void mov_or_nop( Register s, Register d) {
923 if ( s != d ) or3( G0, s, d);
924 else nop();
925 }
926
927 inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); }
928
929 using Assembler::prefetch;
930 inline void prefetch(const Address& a, PrefetchFcn F, int offset = 0);
931
932 using Assembler::stb;
933 using Assembler::sth;
934 using Assembler::stw;
935 using Assembler::stx;
936 using Assembler::std;
937
938 #ifdef ASSERT
939 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
940 inline void st(Register d, Register s1, ByteSize simm13a);
941 #endif
942
943 inline void st(Register d, Register s1, Register s2);
944 inline void st(Register d, Register s1, int simm13a);
945
946 inline void stb(Register d, const Address& a, int offset = 0 );
947 inline void sth(Register d, const Address& a, int offset = 0 );
948 inline void stw(Register d, const Address& a, int offset = 0 );
949 inline void stx(Register d, const Address& a, int offset = 0 );
950 inline void st( Register d, const Address& a, int offset = 0 );
951 inline void std(Register d, const Address& a, int offset = 0 );
952
953 inline void stb(Register d, Register s1, RegisterOrConstant s2 );
954 inline void sth(Register d, Register s1, RegisterOrConstant s2 );
955 inline void stw(Register d, Register s1, RegisterOrConstant s2 );
956 inline void stx(Register d, Register s1, RegisterOrConstant s2 );
957 inline void std(Register d, Register s1, RegisterOrConstant s2 );
958 inline void st( Register d, Register s1, RegisterOrConstant s2 );
959
960 using Assembler::stf;
961 inline void stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2);
962 inline void stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset = 0);
963
964 // Note: offset is added to s2.
965 using Assembler::sub;
966 inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
967
968 using Assembler::swap;
969 inline void swap(Address& a, Register d, int offset = 0);
970
971 // address pseudos: make these names unlike instruction names to avoid confusion
972 inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
973 inline void load_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
974 inline void load_bool_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
975 inline void load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
976 inline void store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
977 inline void store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
978 inline void jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset = 0);
979 inline void jump_to(const AddressLiteral& addrlit, Register temp, int offset = 0);
980 inline void jump_indirect_to(Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0);
981
982 // ring buffer traceable jumps
983
984 void jmp2( Register r1, Register r2, const char* file, int line );
985 void jmp ( Register r1, int offset, const char* file, int line );
986
987 void jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line);
988 void jump (const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line);
989
990
991 // argument pseudos:
992
993 inline void load_argument( Argument& a, Register d );
994 inline void store_argument( Register s, Argument& a );
995 inline void store_ptr_argument( Register s, Argument& a );
996 inline void store_float_argument( FloatRegister s, Argument& a );
997 inline void store_double_argument( FloatRegister s, Argument& a );
998 inline void store_long_argument( Register s, Argument& a );
999
1000 // handy macros:
1001
1002 inline void round_to( Register r, int modulus ) {
1003 assert_not_delayed();
1004 inc( r, modulus - 1 );
1005 and3( r, -modulus, r );
1006 }
1007
1008 // --------------------------------------------------
1009
1010 // Functions for isolating 64 bit loads for LP64
1011 // ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's
1012 // st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's
1013 inline void ld_ptr(Register s1, Register s2, Register d);
1014 inline void ld_ptr(Register s1, int simm13a, Register d);
1015 inline void ld_ptr(Register s1, RegisterOrConstant s2, Register d);
1016 inline void ld_ptr(const Address& a, Register d, int offset = 0);
1017 inline void st_ptr(Register d, Register s1, Register s2);
1018 inline void st_ptr(Register d, Register s1, int simm13a);
1019 inline void st_ptr(Register d, Register s1, RegisterOrConstant s2);
1020 inline void st_ptr(Register d, const Address& a, int offset = 0);
1021
1022 #ifdef ASSERT
1023 // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
1024 inline void ld_ptr(Register s1, ByteSize simm13a, Register d);
1025 inline void st_ptr(Register d, Register s1, ByteSize simm13a);
1026 #endif
1027
1028 // ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
1029 // st_long will perform std for 32 bit VM's and stx for 64 bit VM's
1030 inline void ld_long(Register s1, Register s2, Register d);
1031 inline void ld_long(Register s1, int simm13a, Register d);
1032 inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
1033 inline void ld_long(const Address& a, Register d, int offset = 0);
1034 inline void st_long(Register d, Register s1, Register s2);
1035 inline void st_long(Register d, Register s1, int simm13a);
1036 inline void st_long(Register d, Register s1, RegisterOrConstant s2);
1037 inline void st_long(Register d, const Address& a, int offset = 0);
1038
1039 // Helpers for address formation.
1040 // - They emit only a move if s2 is a constant zero.
1041 // - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result.
1042 // - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant.
1043 RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
1044 RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
1045 RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
1046
1047 RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) {
1048 if (is_simm13(src.constant_or_zero()))
1049 return src; // register or short constant
1050 guarantee(temp != noreg, "constant offset overflow");
1051 set(src.as_constant(), temp);
1052 return temp;
1053 }
1054
1055 // --------------------------------------------------
1056
1057 public:
1058 // traps as per trap.h (SPARC ABI?)
1059
1060 void breakpoint_trap();
1061 void breakpoint_trap(Condition c, CC cc);
1062 void flush_windows_trap();
1063 void clean_windows_trap();
1064 void get_psr_trap();
1065 void set_psr_trap();
1066
1067 // V8/V9 flush_windows
1068 void flush_windows();
1069
1070 // Support for serializing memory accesses between threads
1071 void serialize_memory(Register thread, Register tmp1, Register tmp2);
1072
1073 // Stack frame creation/removal
1074 void enter();
1075 void leave();
1076
1077 // V8/V9 integer multiply
1078 void mult(Register s1, Register s2, Register d);
1079 void mult(Register s1, int simm13a, Register d);
1080
1081 // V8/V9 read and write of condition codes.
1082 void read_ccr(Register d);
1083 void write_ccr(Register s);
1084
1085 // Manipulation of C++ bools
1086 // These are idioms to flag the need for care with accessing bools but on
1087 // this platform we assume byte size
1088
1089 inline void stbool(Register d, const Address& a) { stb(d, a); }
1090 inline void ldbool(const Address& a, Register d) { ldub(a, d); }
1091 inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
1092
1093 // klass oop manipulations if compressed
1094 void load_klass(Register src_oop, Register klass);
1095 void store_klass(Register klass, Register dst_oop);
1096 void store_klass_gap(Register s, Register dst_oop);
1097
1098 // oop manipulations
1099 void load_heap_oop(const Address& s, Register d);
1100 void load_heap_oop(Register s1, Register s2, Register d);
1101 void load_heap_oop(Register s1, int simm13a, Register d);
1102 void load_heap_oop(Register s1, RegisterOrConstant s2, Register d);
1103 void store_heap_oop(Register d, Register s1, Register s2);
1104 void store_heap_oop(Register d, Register s1, int simm13a);
1105 void store_heap_oop(Register d, const Address& a, int offset = 0);
1106
1107 void encode_heap_oop(Register src, Register dst);
1108 void encode_heap_oop(Register r) {
1109 encode_heap_oop(r, r);
1110 }
1111 void decode_heap_oop(Register src, Register dst);
1112 void decode_heap_oop(Register r) {
1113 decode_heap_oop(r, r);
1114 }
1115 void encode_heap_oop_not_null(Register r);
1116 void decode_heap_oop_not_null(Register r);
1117 void encode_heap_oop_not_null(Register src, Register dst);
1118 void decode_heap_oop_not_null(Register src, Register dst);
1119
1120 void encode_klass_not_null(Register r);
1121 void decode_klass_not_null(Register r);
1122 void encode_klass_not_null(Register src, Register dst);
1123 void decode_klass_not_null(Register src, Register dst);
1124
1125 // Support for managing the JavaThread pointer (i.e.; the reference to
1126 // thread-local information).
1127 void get_thread(); // load G2_thread
1128 void verify_thread(); // verify G2_thread contents
1129 void save_thread (const Register threache); // save to cache
1130 void restore_thread(const Register thread_cache); // restore from cache
1131
1132 // Support for last Java frame (but use call_VM instead where possible)
1133 void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
1134 void reset_last_Java_frame(void);
1135
1136 // Call into the VM.
1137 // Passes the thread pointer (in O0) as a prepended argument.
1138 // Makes sure oop return values are visible to the GC.
1139 void call_VM(Register oop_result, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
1140 void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
1141 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
1142 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
1143
1144 // these overloadings are not presently used on SPARC:
1145 void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
1146 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
1147 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
1148 void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
1149
1150 void call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments = 0);
1151 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
1152 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
1153 void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3);
1154
1155 void get_vm_result (Register oop_result);
1156 void get_vm_result_2(Register metadata_result);
1157
1158 // vm result is currently getting hijacked to for oop preservation
1159 void set_vm_result(Register oop_result);
1160
1161 // Emit the CompiledIC call idiom
1162 void ic_call(address entry, bool emit_delay = true);
1163
1164 // if call_VM_base was called with check_exceptions=false, then call
1165 // check_and_forward_exception to handle exceptions when it is safe
1166 void check_and_forward_exception(Register scratch_reg);
1167
1168 private:
1169 // For V8
1170 void read_ccr_trap(Register ccr_save);
1171 void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
1172
1173 #ifdef ASSERT
1174 // For V8 debugging. Uses V8 instruction sequence and checks
1175 // result with V9 insturctions rdccr and wrccr.
1176 // Uses Gscatch and Gscatch2
1177 void read_ccr_v8_assert(Register ccr_save);
1178 void write_ccr_v8_assert(Register ccr_save);
1179 #endif // ASSERT
1180
1181 public:
1182
1183 // Write to card table for - register is destroyed afterwards.
1184 void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
1185
1186 void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
1187
1188 #ifndef SERIALGC
1189 // General G1 pre-barrier generator.
1190 void g1_write_barrier_pre(Register obj, Register index, int offset, Register pre_val, Register tmp, bool preserve_o_regs);
1191
1192 // General G1 post-barrier generator
1193 void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
1194 #endif // SERIALGC
1195
1196 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
1197 void push_fTOS();
1198
1199 // pops double TOS element from CPU stack and pushes on FPU stack
1200 void pop_fTOS();
1201
1202 void empty_FPU_stack();
1203
1204 void push_IU_state();
1205 void pop_IU_state();
1206
1207 void push_FPU_state();
1208 void pop_FPU_state();
1209
1210 void push_CPU_state();
1211 void pop_CPU_state();
1212
1213 // if heap base register is used - reinit it with the correct value
1214 void reinit_heapbase();
1215
1216 // Debugging
1217 void _verify_oop(Register reg, const char * msg, const char * file, int line);
1218 void _verify_oop_addr(Address addr, const char * msg, const char * file, int line);
1219
1220 // TODO: verify_method and klass metadata (compare against vptr?)
1221 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
1222 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
1223
1224 #define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__)
1225 #define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop addr ", __FILE__, __LINE__)
1226 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
1227 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
1228
1229 // only if +VerifyOops
1230 void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
1231 // only if +VerifyFPU
1232 void stop(const char* msg); // prints msg, dumps registers and stops execution
1233 void warn(const char* msg); // prints msg, but don't stop
1234 void untested(const char* what = "");
1235 void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); }
1236 void should_not_reach_here() { stop("should not reach here"); }
1237 void print_CPU_state();
1238
1239 // oops in code
1240 AddressLiteral allocate_oop_address(jobject obj); // allocate_index
1241 AddressLiteral constant_oop_address(jobject obj); // find_index
1242 inline void set_oop (jobject obj, Register d); // uses allocate_oop_address
1243 inline void set_oop_constant (jobject obj, Register d); // uses constant_oop_address
1244 inline void set_oop (const AddressLiteral& obj_addr, Register d); // same as load_address
1245
1246 // metadata in code that we have to keep track of
1247 AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
1248 AddressLiteral constant_metadata_address(Metadata* obj); // find_index
1249 inline void set_metadata (Metadata* obj, Register d); // uses allocate_metadata_address
1250 inline void set_metadata_constant (Metadata* obj, Register d); // uses constant_metadata_address
1251 inline void set_metadata (const AddressLiteral& obj_addr, Register d); // same as load_address
1252
1253 void set_narrow_oop( jobject obj, Register d );
1254 void set_narrow_klass( Klass* k, Register d );
1255
1256 // nop padding
1257 void align(int modulus);
1258
1259 // declare a safepoint
1260 void safepoint();
1261
1262 // factor out part of stop into subroutine to save space
1263 void stop_subroutine();
1264 // factor out part of verify_oop into subroutine to save space
1265 void verify_oop_subroutine();
1266
1267 // side-door communication with signalHandler in os_solaris.cpp
1268 static address _verify_oop_implicit_branch[3];
1269
1270 int total_frame_size_in_bytes(int extraWords);
1271
1272 // used when extraWords known statically
1273 void save_frame(int extraWords = 0);
1274 void save_frame_c1(int size_in_bytes);
1275 // make a frame, and simultaneously pass up one or two register value
1276 // into the new register window
1277 void save_frame_and_mov(int extraWords, Register s1, Register d1, Register s2 = Register(), Register d2 = Register());
1278
1279 // give no. (outgoing) params, calc # of words will need on frame
1280 void calc_mem_param_words(Register Rparam_words, Register Rresult);
1281
1282 // used to calculate frame size dynamically
1283 // result is in bytes and must be negated for save inst
1284 void calc_frame_size(Register extraWords, Register resultReg);
1285
1286 // calc and also save
1287 void calc_frame_size_and_save(Register extraWords, Register resultReg);
1288
1289 static void debug(char* msg, RegistersForDebugging* outWindow);
1290
1291 // implementations of bytecodes used by both interpreter and compiler
1292
1293 void lcmp( Register Ra_hi, Register Ra_low,
1294 Register Rb_hi, Register Rb_low,
1295 Register Rresult);
1296
1297 void lneg( Register Rhi, Register Rlow );
1298
1299 void lshl( Register Rin_high, Register Rin_low, Register Rcount,
1300 Register Rout_high, Register Rout_low, Register Rtemp );
1301
1302 void lshr( Register Rin_high, Register Rin_low, Register Rcount,
1303 Register Rout_high, Register Rout_low, Register Rtemp );
1304
1305 void lushr( Register Rin_high, Register Rin_low, Register Rcount,
1306 Register Rout_high, Register Rout_low, Register Rtemp );
1307
1308 #ifdef _LP64
1309 void lcmp( Register Ra, Register Rb, Register Rresult);
1310 #endif
1311
1312 // Load and store values by size and signed-ness
1313 void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed);
1314 void store_sized_value(Register src, Address dst, size_t size_in_bytes);
1315
1316 void float_cmp( bool is_float, int unordered_result,
1317 FloatRegister Fa, FloatRegister Fb,
1318 Register Rresult);
1319
1320 void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
1321 void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
1322 void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
1323 void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
1324
1325 void save_all_globals_into_locals();
1326 void restore_globals_from_locals();
1327
1328 void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
1329 address lock_addr=0, bool use_call_vm=false);
1330 void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
1331 address lock_addr=0, bool use_call_vm=false);
1332 void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
1333
1334 // These set the icc condition code to equal if the lock succeeded
1335 // and notEqual if it failed and requires a slow case
1336 void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
1337 Register Rscratch,
1338 BiasedLockingCounters* counters = NULL,
1339 bool try_bias = UseBiasedLocking);
1340 void compiler_unlock_object(Register Roop, Register Rmark, Register Rbox,
1341 Register Rscratch,
1342 bool try_bias = UseBiasedLocking);
1343
1344 // Biased locking support
1345 // Upon entry, lock_reg must point to the lock record on the stack,
1346 // obj_reg must contain the target object, and mark_reg must contain
1347 // the target object's header.
1348 // Destroys mark_reg if an attempt is made to bias an anonymously
1349 // biased lock. In this case a failure will go either to the slow
1350 // case or fall through with the notEqual condition code set with
1351 // the expectation that the slow case in the runtime will be called.
1352 // In the fall-through case where the CAS-based lock is done,
1353 // mark_reg is not destroyed.
1354 void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
1355 Label& done, Label* slow_case = NULL,
1356 BiasedLockingCounters* counters = NULL);
1357 // Upon entry, the base register of mark_addr must contain the oop.
1358 // Destroys temp_reg.
1359
1360 // If allow_delay_slot_filling is set to true, the next instruction
1361 // emitted after this one will go in an annulled delay slot if the
1362 // biased locking exit case failed.
1363 void biased_locking_exit(Address mark_addr, Register temp_reg, Label& done, bool allow_delay_slot_filling = false);
1364
1365 // allocation
1366 void eden_allocate(
1367 Register obj, // result: pointer to object after successful allocation
1368 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
1369 int con_size_in_bytes, // object size in bytes if known at compile time
1370 Register t1, // temp register
1371 Register t2, // temp register
1372 Label& slow_case // continuation point if fast allocation fails
1373 );
1374 void tlab_allocate(
1375 Register obj, // result: pointer to object after successful allocation
1376 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
1377 int con_size_in_bytes, // object size in bytes if known at compile time
1378 Register t1, // temp register
1379 Label& slow_case // continuation point if fast allocation fails
1380 );
1381 void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
1382 void incr_allocated_bytes(RegisterOrConstant size_in_bytes,
1383 Register t1, Register t2);
1384
1385 // interface method calling
1386 void lookup_interface_method(Register recv_klass,
1387 Register intf_klass,
1388 RegisterOrConstant itable_index,
1389 Register method_result,
1390 Register temp_reg, Register temp2_reg,
1391 Label& no_such_interface);
1392
1393 // virtual method calling
1394 void lookup_virtual_method(Register recv_klass,
1395 RegisterOrConstant vtable_index,
1396 Register method_result);
1397
1398 // Test sub_klass against super_klass, with fast and slow paths.
1399
1400 // The fast path produces a tri-state answer: yes / no / maybe-slow.
1401 // One of the three labels can be NULL, meaning take the fall-through.
1402 // If super_check_offset is -1, the value is loaded up from super_klass.
1403 // No registers are killed, except temp_reg and temp2_reg.
1404 // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
1405 void check_klass_subtype_fast_path(Register sub_klass,
1406 Register super_klass,
1407 Register temp_reg,
1408 Register temp2_reg,
1409 Label* L_success,
1410 Label* L_failure,
1411 Label* L_slow_path,
1412 RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
1413
1414 // The rest of the type check; must be wired to a corresponding fast path.
1415 // It does not repeat the fast path logic, so don't use it standalone.
1416 // The temp_reg can be noreg, if no temps are available.
1417 // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
1418 // Updates the sub's secondary super cache as necessary.
1419 void check_klass_subtype_slow_path(Register sub_klass,
1420 Register super_klass,
1421 Register temp_reg,
1422 Register temp2_reg,
1423 Register temp3_reg,
1424 Register temp4_reg,
1425 Label* L_success,
1426 Label* L_failure);
1427
1428 // Simplified, combined version, good for typical uses.
1429 // Falls through on failure.
1430 void check_klass_subtype(Register sub_klass,
1431 Register super_klass,
1432 Register temp_reg,
1433 Register temp2_reg,
1434 Label& L_success);
1435
1436 // method handles (JSR 292)
1437 // offset relative to Gargs of argument at tos[arg_slot].
1438 // (arg_slot == 0 means the last argument, not the first).
1439 RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
1440 Register temp_reg,
1441 int extra_slot_offset = 0);
1442 // Address of Gargs and argument_offset.
1443 Address argument_address(RegisterOrConstant arg_slot,
1444 Register temp_reg = noreg,
1445 int extra_slot_offset = 0);
1446
1447 // Stack overflow checking
1448
1449 // Note: this clobbers G3_scratch
1450 void bang_stack_with_offset(int offset) {
1451 // stack grows down, caller passes positive offset
1452 assert(offset > 0, "must bang with negative offset");
1453 set((-offset)+STACK_BIAS, G3_scratch);
1454 st(G0, SP, G3_scratch);
1455 }
1456
1457 // Writes to stack successive pages until offset reached to check for
1458 // stack overflow + shadow pages. Clobbers tsp and scratch registers.
1459 void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch);
1460
1461 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
1462
1463 void verify_tlab();
1464
1465 Condition negate_condition(Condition cond);
1466
1467 // Helper functions for statistics gathering.
1468 // Conditionally (non-atomically) increments passed counter address, preserving condition codes.
1469 void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2);
1470 // Unconditional increment.
1471 void inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2);
1472 void inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2);
1473
1474 // Compare char[] arrays aligned to 4 bytes.
1475 void char_arrays_equals(Register ary1, Register ary2,
1476 Register limit, Register result,
1477 Register chr1, Register chr2, Label& Ldone);
1478 // Use BIS for zeroing
1479 void bis_zeroing(Register to, Register count, Register temp, Label& Ldone);
1480
1481 #undef VIRTUAL
1482 };
1483
1484 /**
1485 * class SkipIfEqual:
1486 *
1487 * Instantiating this class will result in assembly code being output that will
1488 * jump around any code emitted between the creation of the instance and it's
1489 * automatic destruction at the end of a scope block, depending on the value of
1490 * the flag passed to the constructor, which will be checked at run-time.
1491 */
1492 class SkipIfEqual : public StackObj {
1493 private:
1494 MacroAssembler* _masm;
1495 Label _label;
1496
1497 public:
1498 // 'temp' is a temp register that this object can use (and trash)
1499 SkipIfEqual(MacroAssembler*, Register temp,
1500 const bool* flag_addr, Assembler::Condition condition);
1501 ~SkipIfEqual();
1502 };
1503
1504 #endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP