Mercurial > hg > truffle
comparison src/cpu/ppc/vm/macroAssembler_ppc.hpp @ 14408:ec28f9c041ff
8019972: PPC64 (part 9): platform files for interpreter only VM.
Summary: With this change the HotSpot core build works on Linux/PPC64. The VM succesfully executes simple test programs.
Reviewed-by: kvn
author | goetz |
---|---|
date | Fri, 02 Aug 2013 16:46:45 +0200 |
parents | |
children | eb178e97560c |
comparison
equal
deleted
inserted
replaced
14407:94c202aa2646 | 14408:ec28f9c041ff |
---|---|
1 /* | |
2 * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. | |
3 * Copyright 2012, 2013 SAP AG. All rights reserved. | |
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 * | |
6 * This code is free software; you can redistribute it and/or modify it | |
7 * under the terms of the GNU General Public License version 2 only, as | |
8 * published by the Free Software Foundation. | |
9 * | |
10 * This code is distributed in the hope that it will be useful, but WITHOUT | |
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
13 * version 2 for more details (a copy is included in the LICENSE file that | |
14 * accompanied this code). | |
15 * | |
16 * You should have received a copy of the GNU General Public License version | |
17 * 2 along with this work; if not, write to the Free Software Foundation, | |
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
19 * | |
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |
21 * or visit www.oracle.com if you need additional information or have any | |
22 * questions. | |
23 * | |
24 */ | |
25 | |
26 #ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_HPP | |
27 #define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP | |
28 | |
29 #include "asm/assembler.hpp" | |
30 | |
31 // MacroAssembler extends Assembler by a few frequently used macros. | |
32 | |
33 class ciTypeArray; | |
34 | |
35 class MacroAssembler: public Assembler { | |
36 public: | |
37 MacroAssembler(CodeBuffer* code) : Assembler(code) {} | |
38 | |
39 // | |
40 // Optimized instruction emitters | |
41 // | |
42 | |
43 inline static int largeoffset_si16_si16_hi(int si31) { return (si31 + (1<<15)) >> 16; } | |
44 inline static int largeoffset_si16_si16_lo(int si31) { return si31 - (((si31 + (1<<15)) >> 16) << 16); } | |
45 | |
46 // load d = *[a+si31] | |
47 // Emits several instructions if the offset is not encodable in one instruction. | |
48 void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop); | |
49 void ld_largeoffset (Register d, int si31, Register a, int emit_filler_nop); | |
50 inline static bool is_ld_largeoffset(address a); | |
51 inline static int get_ld_largeoffset_offset(address a); | |
52 | |
53 inline void round_to(Register r, int modulus); | |
54 | |
55 // Load/store with type given by parameter. | |
56 void load_sized_value( Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes, bool is_signed); | |
57 void store_sized_value(Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes); | |
58 | |
59 // Move register if destination register and target register are different | |
60 inline void mr_if_needed(Register rd, Register rs); | |
61 | |
62 // nop padding | |
63 void align(int modulus); | |
64 | |
65 // | |
66 // Constants, loading constants, TOC support | |
67 // | |
68 | |
69 // Address of the global TOC. | |
70 inline static address global_toc(); | |
71 // Offset of given address to the global TOC. | |
72 inline static int offset_to_global_toc(const address addr); | |
73 | |
74 // Address of TOC of the current method. | |
75 inline address method_toc(); | |
76 // Offset of given address to TOC of the current method. | |
77 inline int offset_to_method_toc(const address addr); | |
78 | |
79 // Global TOC. | |
80 void calculate_address_from_global_toc(Register dst, address addr, | |
81 bool hi16 = true, bool lo16 = true, | |
82 bool add_relocation = true, bool emit_dummy_addr = false); | |
83 inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) { | |
84 calculate_address_from_global_toc(dst, addr, true, false); | |
85 }; | |
86 inline void calculate_address_from_global_toc_lo16only(Register dst, address addr) { | |
87 calculate_address_from_global_toc(dst, addr, false, true); | |
88 }; | |
89 | |
90 inline static bool is_calculate_address_from_global_toc_at(address a, address bound); | |
91 static int patch_calculate_address_from_global_toc_at(address a, address addr, address bound); | |
92 static address get_address_of_calculate_address_from_global_toc_at(address a, address addr); | |
93 | |
94 #ifdef _LP64 | |
95 // Patch narrow oop constant. | |
96 inline static bool is_set_narrow_oop(address a, address bound); | |
97 static int patch_set_narrow_oop(address a, address bound, narrowOop data); | |
98 static narrowOop get_narrow_oop(address a, address bound); | |
99 #endif | |
100 | |
101 inline static bool is_load_const_at(address a); | |
102 | |
103 // Emits an oop const to the constant pool, loads the constant, and | |
104 // sets a relocation info with address current_pc. | |
105 void load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc); | |
106 void load_toc_from_toc(Register dst, AddressLiteral& a, Register toc) { | |
107 assert(dst == R2_TOC, "base register must be TOC"); | |
108 load_const_from_method_toc(dst, a, toc); | |
109 } | |
110 | |
111 static bool is_load_const_from_method_toc_at(address a); | |
112 static int get_offset_of_load_const_from_method_toc_at(address a); | |
113 | |
114 // Get the 64 bit constant from a `load_const' sequence. | |
115 static long get_const(address load_const); | |
116 | |
117 // Patch the 64 bit constant of a `load_const' sequence. This is a | |
118 // low level procedure. It neither flushes the instruction cache nor | |
119 // is it atomic. | |
120 static void patch_const(address load_const, long x); | |
121 | |
122 // Metadata in code that we have to keep track of. | |
123 AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index | |
124 AddressLiteral constant_metadata_address(Metadata* obj); // find_index | |
125 // Oops used directly in compiled code are stored in the constant pool, | |
126 // and loaded from there. | |
127 // Allocate new entry for oop in constant pool. Generate relocation. | |
128 AddressLiteral allocate_oop_address(jobject obj); | |
129 // Find oop obj in constant pool. Return relocation with it's index. | |
130 AddressLiteral constant_oop_address(jobject obj); | |
131 | |
132 // Find oop in constant pool and emit instructions to load it. | |
133 // Uses constant_oop_address. | |
134 inline void set_oop_constant(jobject obj, Register d); | |
135 // Same as load_address. | |
136 inline void set_oop (AddressLiteral obj_addr, Register d); | |
137 | |
138 // Read runtime constant: Issue load if constant not yet established, | |
139 // else use real constant. | |
140 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, | |
141 Register tmp, | |
142 int offset); | |
143 | |
144 // | |
145 // branch, jump | |
146 // | |
147 | |
148 inline void pd_patch_instruction(address branch, address target); | |
149 NOT_PRODUCT(static void pd_print_patched_instruction(address branch);) | |
150 | |
151 // Conditional far branch for destinations encodable in 24+2 bits. | |
152 // Same interface as bc, e.g. no inverse boint-field. | |
153 enum { | |
154 bc_far_optimize_not = 0, | |
155 bc_far_optimize_on_relocate = 1 | |
156 }; | |
157 // optimize: flag for telling the conditional far branch to optimize | |
158 // itself when relocated. | |
159 void bc_far(int boint, int biint, Label& dest, int optimize); | |
160 // Relocation of conditional far branches. | |
161 static bool is_bc_far_at(address instruction_addr); | |
162 static address get_dest_of_bc_far_at(address instruction_addr); | |
163 static void set_dest_of_bc_far_at(address instruction_addr, address dest); | |
164 private: | |
165 static bool inline is_bc_far_variant1_at(address instruction_addr); | |
166 static bool inline is_bc_far_variant2_at(address instruction_addr); | |
167 static bool inline is_bc_far_variant3_at(address instruction_addr); | |
168 public: | |
169 | |
170 // Convenience bc_far versions. | |
171 inline void blt_far(ConditionRegister crx, Label& L, int optimize); | |
172 inline void bgt_far(ConditionRegister crx, Label& L, int optimize); | |
173 inline void beq_far(ConditionRegister crx, Label& L, int optimize); | |
174 inline void bso_far(ConditionRegister crx, Label& L, int optimize); | |
175 inline void bge_far(ConditionRegister crx, Label& L, int optimize); | |
176 inline void ble_far(ConditionRegister crx, Label& L, int optimize); | |
177 inline void bne_far(ConditionRegister crx, Label& L, int optimize); | |
178 inline void bns_far(ConditionRegister crx, Label& L, int optimize); | |
179 | |
180 // Emit, identify and patch a NOT mt-safe patchable 64 bit absolute call/jump. | |
181 private: | |
182 enum { | |
183 bxx64_patchable_instruction_count = (2/*load_codecache_const*/ + 3/*5load_const*/ + 1/*mtctr*/ + 1/*bctrl*/), | |
184 bxx64_patchable_size = bxx64_patchable_instruction_count * BytesPerInstWord, | |
185 bxx64_patchable_ret_addr_offset = bxx64_patchable_size | |
186 }; | |
187 void bxx64_patchable(address target, relocInfo::relocType rt, bool link); | |
188 static bool is_bxx64_patchable_at( address instruction_addr, bool link); | |
189 // Does the instruction use a pc-relative encoding of the destination? | |
190 static bool is_bxx64_patchable_pcrelative_at( address instruction_addr, bool link); | |
191 static bool is_bxx64_patchable_variant1_at( address instruction_addr, bool link); | |
192 // Load destination relative to global toc. | |
193 static bool is_bxx64_patchable_variant1b_at( address instruction_addr, bool link); | |
194 static bool is_bxx64_patchable_variant2_at( address instruction_addr, bool link); | |
195 static void set_dest_of_bxx64_patchable_at( address instruction_addr, address target, bool link); | |
196 static address get_dest_of_bxx64_patchable_at(address instruction_addr, bool link); | |
197 | |
198 public: | |
199 // call | |
200 enum { | |
201 bl64_patchable_instruction_count = bxx64_patchable_instruction_count, | |
202 bl64_patchable_size = bxx64_patchable_size, | |
203 bl64_patchable_ret_addr_offset = bxx64_patchable_ret_addr_offset | |
204 }; | |
205 inline void bl64_patchable(address target, relocInfo::relocType rt) { | |
206 bxx64_patchable(target, rt, /*link=*/true); | |
207 } | |
208 inline static bool is_bl64_patchable_at(address instruction_addr) { | |
209 return is_bxx64_patchable_at(instruction_addr, /*link=*/true); | |
210 } | |
211 inline static bool is_bl64_patchable_pcrelative_at(address instruction_addr) { | |
212 return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/true); | |
213 } | |
214 inline static void set_dest_of_bl64_patchable_at(address instruction_addr, address target) { | |
215 set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/true); | |
216 } | |
217 inline static address get_dest_of_bl64_patchable_at(address instruction_addr) { | |
218 return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/true); | |
219 } | |
220 // jump | |
221 enum { | |
222 b64_patchable_instruction_count = bxx64_patchable_instruction_count, | |
223 b64_patchable_size = bxx64_patchable_size, | |
224 }; | |
225 inline void b64_patchable(address target, relocInfo::relocType rt) { | |
226 bxx64_patchable(target, rt, /*link=*/false); | |
227 } | |
228 inline static bool is_b64_patchable_at(address instruction_addr) { | |
229 return is_bxx64_patchable_at(instruction_addr, /*link=*/false); | |
230 } | |
231 inline static bool is_b64_patchable_pcrelative_at(address instruction_addr) { | |
232 return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/false); | |
233 } | |
234 inline static void set_dest_of_b64_patchable_at(address instruction_addr, address target) { | |
235 set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/false); | |
236 } | |
237 inline static address get_dest_of_b64_patchable_at(address instruction_addr) { | |
238 return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/false); | |
239 } | |
240 | |
241 // | |
242 // Support for frame handling | |
243 // | |
244 | |
245 // some ABI-related functions | |
246 void save_nonvolatile_gprs( Register dst_base, int offset); | |
247 void restore_nonvolatile_gprs(Register src_base, int offset); | |
248 void save_volatile_gprs( Register dst_base, int offset); | |
249 void restore_volatile_gprs(Register src_base, int offset); | |
250 void save_LR_CR( Register tmp); // tmp contains LR on return. | |
251 void restore_LR_CR(Register tmp); | |
252 | |
253 // Get current PC using bl-next-instruction trick. | |
254 address get_PC_trash_LR(Register result); | |
255 | |
256 // Resize current frame either relatively wrt to current SP or absolute. | |
257 void resize_frame(Register offset, Register tmp); | |
258 void resize_frame(int offset, Register tmp); | |
259 void resize_frame_absolute(Register addr, Register tmp1, Register tmp2); | |
260 | |
261 // Push a frame of size bytes. | |
262 void push_frame(Register bytes, Register tmp); | |
263 | |
264 // Push a frame of size `bytes'. No abi space provided. | |
265 void push_frame(unsigned int bytes, Register tmp); | |
266 | |
267 // Push a frame of size `bytes' plus abi112 on top. | |
268 void push_frame_abi112(unsigned int bytes, Register tmp); | |
269 | |
270 // Setup up a new C frame with a spill area for non-volatile GPRs and additional | |
271 // space for local variables | |
272 void push_frame_abi112_nonvolatiles(unsigned int bytes, Register tmp); | |
273 | |
274 // pop current C frame | |
275 void pop_frame(); | |
276 | |
277 // | |
278 // Calls | |
279 // | |
280 | |
281 private: | |
282 address _last_calls_return_pc; | |
283 | |
284 // Generic version of a call to C function via a function descriptor | |
285 // with variable support for C calling conventions (TOC, ENV, etc.). | |
286 // updates and returns _last_calls_return_pc. | |
287 address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call, | |
288 bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee); | |
289 | |
290 public: | |
291 | |
292 // Get the pc where the last call will return to. returns _last_calls_return_pc. | |
293 inline address last_calls_return_pc(); | |
294 | |
295 // Call a C function via a function descriptor and use full C | |
296 // calling conventions. Updates and returns _last_calls_return_pc. | |
297 address call_c(Register function_descriptor); | |
298 address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt); | |
299 address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt, | |
300 Register toc); | |
301 | |
302 protected: | |
303 | |
304 // It is imperative that all calls into the VM are handled via the | |
305 // call_VM macros. They make sure that the stack linkage is setup | |
306 // correctly. call_VM's correspond to ENTRY/ENTRY_X entry points | |
307 // while call_VM_leaf's correspond to LEAF entry points. | |
308 // | |
309 // This is the base routine called by the different versions of | |
310 // call_VM. The interpreter may customize this version by overriding | |
311 // it for its purposes (e.g., to save/restore additional registers | |
312 // when doing a VM call). | |
313 // | |
314 // If no last_java_sp is specified (noreg) then SP will be used instead. | |
315 virtual void call_VM_base( | |
316 // where an oop-result ends up if any; use noreg otherwise | |
317 Register oop_result, | |
318 // to set up last_Java_frame in stubs; use noreg otherwise | |
319 Register last_java_sp, | |
320 // the entry point | |
321 address entry_point, | |
322 // flag which indicates if exception should be checked | |
323 bool check_exception=true | |
324 ); | |
325 | |
326 // Support for VM calls. This is the base routine called by the | |
327 // different versions of call_VM_leaf. The interpreter may customize | |
328 // this version by overriding it for its purposes (e.g., to | |
329 // save/restore additional registers when doing a VM call). | |
330 void call_VM_leaf_base(address entry_point); | |
331 | |
332 public: | |
333 // Call into the VM. | |
334 // Passes the thread pointer (in R3_ARG1) as a prepended argument. | |
335 // Makes sure oop return values are visible to the GC. | |
336 void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); | |
337 void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); | |
338 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); | |
339 void call_VM_leaf(address entry_point); | |
340 void call_VM_leaf(address entry_point, Register arg_1); | |
341 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2); | |
342 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); | |
343 | |
344 // Call a stub function via a function descriptor, but don't save | |
345 // TOC before call, don't setup TOC and ENV for call, and don't | |
346 // restore TOC after call. Updates and returns _last_calls_return_pc. | |
347 inline address call_stub(Register function_entry); | |
348 inline void call_stub_and_return_to(Register function_entry, Register return_pc); | |
349 | |
350 // | |
351 // Java utilities | |
352 // | |
353 | |
354 // Read from the polling page, its address is already in a register. | |
355 inline void load_from_polling_page(Register polling_page_address, int offset = 0); | |
356 // Check whether instruction is a read access to the polling page | |
357 // which was emitted by load_from_polling_page(..). | |
358 static bool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/, | |
359 address* polling_address_ptr = NULL); | |
360 | |
361 // Check whether instruction is a write access to the memory | |
362 // serialization page realized by one of the instructions stw, stwu, | |
363 // stwx, or stwux. | |
364 static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext); | |
365 | |
366 // Support for NULL-checks | |
367 // | |
368 // Generates code that causes a NULL OS exception if the content of reg is NULL. | |
369 // If the accessed location is M[reg + offset] and the offset is known, provide the | |
370 // offset. No explicit code generation is needed if the offset is within a certain | |
371 // range (0 <= offset <= page_size). | |
372 | |
373 // Stack overflow checking | |
374 void bang_stack_with_offset(int offset); | |
375 | |
376 // If instruction is a stack bang of the form ld, stdu, or | |
377 // stdux, return the banged address. Otherwise, return 0. | |
378 static address get_stack_bang_address(int instruction, void* ucontext); | |
379 | |
380 // Atomics | |
381 // CmpxchgX sets condition register to cmpX(current, compare). | |
382 // (flag == ne) => (dest_current_value != compare_value), (!swapped) | |
383 // (flag == eq) => (dest_current_value == compare_value), ( swapped) | |
384 static inline bool cmpxchgx_hint_acquire_lock() { return true; } | |
385 // The stxcx will probably not be succeeded by a releasing store. | |
386 static inline bool cmpxchgx_hint_release_lock() { return false; } | |
387 static inline bool cmpxchgx_hint_atomic_update() { return false; } | |
388 | |
389 // Cmpxchg semantics | |
390 enum { | |
391 MemBarNone = 0, | |
392 MemBarRel = 1, | |
393 MemBarAcq = 2, | |
394 MemBarFenceAfter = 4 // use powers of 2 | |
395 }; | |
396 void cmpxchgw(ConditionRegister flag, | |
397 Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base, | |
398 int semantics, bool cmpxchgx_hint = false, | |
399 Register int_flag_success = noreg, bool contention_hint = false); | |
400 void cmpxchgd(ConditionRegister flag, | |
401 Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base, | |
402 int semantics, bool cmpxchgx_hint = false, | |
403 Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false); | |
404 | |
405 // interface method calling | |
406 void lookup_interface_method(Register recv_klass, | |
407 Register intf_klass, | |
408 RegisterOrConstant itable_index, | |
409 Register method_result, | |
410 Register temp_reg, Register temp2_reg, | |
411 Label& no_such_interface); | |
412 | |
413 // virtual method calling | |
414 void lookup_virtual_method(Register recv_klass, | |
415 RegisterOrConstant vtable_index, | |
416 Register method_result); | |
417 | |
418 // Test sub_klass against super_klass, with fast and slow paths. | |
419 | |
420 // The fast path produces a tri-state answer: yes / no / maybe-slow. | |
421 // One of the three labels can be NULL, meaning take the fall-through. | |
422 // If super_check_offset is -1, the value is loaded up from super_klass. | |
423 // No registers are killed, except temp_reg and temp2_reg. | |
424 // If super_check_offset is not -1, temp2_reg is not used and can be noreg. | |
425 void check_klass_subtype_fast_path(Register sub_klass, | |
426 Register super_klass, | |
427 Register temp1_reg, | |
428 Register temp2_reg, | |
429 Label& L_success, | |
430 Label& L_failure); | |
431 | |
432 // The rest of the type check; must be wired to a corresponding fast path. | |
433 // It does not repeat the fast path logic, so don't use it standalone. | |
434 // The temp_reg can be noreg, if no temps are available. | |
435 // It can also be sub_klass or super_klass, meaning it's OK to kill that one. | |
436 // Updates the sub's secondary super cache as necessary. | |
437 void check_klass_subtype_slow_path(Register sub_klass, | |
438 Register super_klass, | |
439 Register temp1_reg, | |
440 Register temp2_reg, | |
441 Label* L_success = NULL, | |
442 Register result_reg = noreg); | |
443 | |
444 // Simplified, combined version, good for typical uses. | |
445 // Falls through on failure. | |
446 void check_klass_subtype(Register sub_klass, | |
447 Register super_klass, | |
448 Register temp1_reg, | |
449 Register temp2_reg, | |
450 Label& L_success); | |
451 | |
452 // Method handle support (JSR 292). | |
453 void check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type); | |
454 | |
455 RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0); | |
456 | |
457 // Biased locking support | |
458 // Upon entry,obj_reg must contain the target object, and mark_reg | |
459 // must contain the target object's header. | |
460 // Destroys mark_reg if an attempt is made to bias an anonymously | |
461 // biased lock. In this case a failure will go either to the slow | |
462 // case or fall through with the notEqual condition code set with | |
463 // the expectation that the slow case in the runtime will be called. | |
464 // In the fall-through case where the CAS-based lock is done, | |
465 // mark_reg is not destroyed. | |
466 void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg, | |
467 Register temp2_reg, Label& done, Label* slow_case = NULL); | |
468 // Upon entry, the base register of mark_addr must contain the oop. | |
469 // Destroys temp_reg. | |
470 // If allow_delay_slot_filling is set to true, the next instruction | |
471 // emitted after this one will go in an annulled delay slot if the | |
472 // biased locking exit case failed. | |
473 void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done); | |
474 | |
475 void compiler_fast_lock_object( ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3); | |
476 void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3); | |
477 | |
478 // Support for serializing memory accesses between threads | |
479 void serialize_memory(Register thread, Register tmp1, Register tmp2); | |
480 | |
481 // GC barrier support. | |
482 void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp); | |
483 void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj); | |
484 | |
485 #ifndef SERIALGC | |
486 // General G1 pre-barrier generator. | |
487 void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val, | |
488 Register Rtmp1, Register Rtmp2, bool needs_frame = false); | |
489 // General G1 post-barrier generator | |
490 void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1, | |
491 Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL); | |
492 #endif // SERIALGC | |
493 | |
494 // Support for managing the JavaThread pointer (i.e.; the reference to | |
495 // thread-local information). | |
496 | |
497 // Support for last Java frame (but use call_VM instead where possible): | |
498 // access R16_thread->last_Java_sp. | |
499 void set_last_Java_frame(Register last_java_sp, Register last_Java_pc); | |
500 void reset_last_Java_frame(void); | |
501 void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1); | |
502 | |
503 // Read vm result from thread: oop_result = R16_thread->result; | |
504 void get_vm_result (Register oop_result); | |
505 void get_vm_result_2(Register metadata_result); | |
506 | |
507 static bool needs_explicit_null_check(intptr_t offset); | |
508 | |
509 // Trap-instruction-based checks. | |
510 // Range checks can be distinguished from zero checks as they check 32 bit, | |
511 // zero checks all 64 bits (tw, td). | |
512 inline void trap_null_check(Register a, trap_to_bits cmp = traptoEqual); | |
513 static bool is_trap_null_check(int x) { | |
514 return is_tdi(x, traptoEqual, -1/*any reg*/, 0) || | |
515 is_tdi(x, traptoGreaterThanUnsigned, -1/*any reg*/, 0); | |
516 } | |
517 | |
518 inline void trap_zombie_not_entrant(); | |
519 static bool is_trap_zombie_not_entrant(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 1); } | |
520 | |
521 inline void trap_should_not_reach_here(); | |
522 static bool is_trap_should_not_reach_here(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 2); } | |
523 | |
524 inline void trap_ic_miss_check(Register a, Register b); | |
525 static bool is_trap_ic_miss_check(int x) { | |
526 return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/); | |
527 } | |
528 | |
529 // Implicit or explicit null check, jumps to static address exception_entry. | |
530 inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry); | |
531 | |
532 // Check accessed object for null. Use SIGTRAP-based null checks on AIX. | |
533 inline void ld_with_trap_null_check(Register d, int si16, Register s1); | |
534 // Variant for heap OOPs including decompression of compressed OOPs. | |
535 inline void load_heap_oop_with_trap_null_check(Register d, RegisterOrConstant offs, Register s1); | |
536 | |
537 // Load heap oop and decompress. Loaded oop may not be null. | |
538 inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg); | |
539 | |
540 // Null allowed. | |
541 inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg); | |
542 | |
543 // Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong. | |
544 inline void encode_heap_oop_not_null(Register d); | |
545 inline void decode_heap_oop_not_null(Register d); | |
546 | |
547 // Null allowed. | |
548 inline void decode_heap_oop(Register d); | |
549 | |
550 // Load/Store klass oop from klass field. Compress. | |
551 void load_klass(Register dst, Register src); | |
552 void load_klass_with_trap_null_check(Register dst, Register src); | |
553 void store_klass(Register dst_oop, Register klass, Register tmp = R0); | |
554 void decode_klass_not_null(Register dst, Register src = noreg); | |
555 void encode_klass_not_null(Register dst, Register src = noreg); | |
556 | |
557 // Load common heap base into register. | |
558 void reinit_heapbase(Register d, Register tmp = noreg); | |
559 | |
560 // SIGTRAP-based range checks for arrays. | |
561 inline void trap_range_check_l(Register a, Register b); | |
562 inline void trap_range_check_l(Register a, int si16); | |
563 static bool is_trap_range_check_l(int x) { | |
564 return (is_tw (x, traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/) || | |
565 is_twi(x, traptoLessThanUnsigned, -1/*any reg*/) ); | |
566 } | |
567 inline void trap_range_check_le(Register a, int si16); | |
568 static bool is_trap_range_check_le(int x) { | |
569 return is_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/); | |
570 } | |
571 inline void trap_range_check_g(Register a, int si16); | |
572 static bool is_trap_range_check_g(int x) { | |
573 return is_twi(x, traptoGreaterThanUnsigned, -1/*any reg*/); | |
574 } | |
575 inline void trap_range_check_ge(Register a, Register b); | |
576 inline void trap_range_check_ge(Register a, int si16); | |
577 static bool is_trap_range_check_ge(int x) { | |
578 return (is_tw (x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/, -1/*any reg*/) || | |
579 is_twi(x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/) ); | |
580 } | |
581 static bool is_trap_range_check(int x) { | |
582 return is_trap_range_check_l(x) || is_trap_range_check_le(x) || | |
583 is_trap_range_check_g(x) || is_trap_range_check_ge(x); | |
584 } | |
585 | |
586 // Needle of length 1. | |
587 void string_indexof_1(Register result, Register haystack, Register haycnt, | |
588 Register needle, jchar needleChar, | |
589 Register tmp1, Register tmp2); | |
590 // General indexof, eventually with constant needle length. | |
591 void string_indexof(Register result, Register haystack, Register haycnt, | |
592 Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval, | |
593 Register tmp1, Register tmp2, Register tmp3, Register tmp4); | |
594 void string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg, | |
595 Register result_reg, Register tmp_reg); | |
596 void char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg, | |
597 Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg, | |
598 Register tmp5_reg); | |
599 void char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg, | |
600 Register tmp1_reg, Register tmp2_reg); | |
601 | |
602 // | |
603 // Debugging | |
604 // | |
605 | |
606 // assert on cr0 | |
607 void asm_assert(bool check_equal, const char* msg, int id); | |
608 void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); } | |
609 void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); } | |
610 | |
611 private: | |
612 void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base, | |
613 const char* msg, int id); | |
614 | |
615 public: | |
616 | |
617 void asm_assert_mem8_is_zero(int mem_offset, Register mem_base, const char* msg, int id) { | |
618 asm_assert_mems_zero(true, 8, mem_offset, mem_base, msg, id); | |
619 } | |
620 void asm_assert_mem8_isnot_zero(int mem_offset, Register mem_base, const char* msg, int id) { | |
621 asm_assert_mems_zero(false, 8, mem_offset, mem_base, msg, id); | |
622 } | |
623 | |
624 // Verify R16_thread contents. | |
625 void verify_thread(); | |
626 | |
627 // Emit code to verify that reg contains a valid oop if +VerifyOops is set. | |
628 void verify_oop(Register reg, const char* s = "broken oop"); | |
629 | |
630 // TODO: verify method and klass metadata (compare against vptr?) | |
631 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} | |
632 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} | |
633 | |
634 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) | |
635 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) | |
636 | |
637 private: | |
638 | |
639 enum { | |
640 stop_stop = 0, | |
641 stop_untested = 1, | |
642 stop_unimplemented = 2, | |
643 stop_shouldnotreachhere = 3, | |
644 stop_end = 4 | |
645 }; | |
646 void stop(int type, const char* msg, int id); | |
647 | |
648 public: | |
649 // Prints msg, dumps registers and stops execution. | |
650 void stop (const char* msg = "", int id = 0) { stop(stop_stop, msg, id); } | |
651 void untested (const char* msg = "", int id = 0) { stop(stop_untested, msg, id); } | |
652 void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented, msg, id); } | |
653 void should_not_reach_here() { stop(stop_shouldnotreachhere, "", -1); } | |
654 | |
655 void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN; | |
656 }; | |
657 | |
658 #endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP |