0
|
1 /*
|
337
|
2 * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
|
0
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
4 *
|
|
5 * This code is free software; you can redistribute it and/or modify it
|
|
6 * under the terms of the GNU General Public License version 2 only, as
|
|
7 * published by the Free Software Foundation.
|
|
8 *
|
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
|
13 * accompanied this code).
|
|
14 *
|
|
15 * You should have received a copy of the GNU General Public License version
|
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
18 *
|
|
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
|
20 * CA 95054 USA or visit www.sun.com if you need additional information or
|
|
21 * have any questions.
|
|
22 *
|
|
23 */
|
|
24
|
|
25 # include "incls/_precompiled.incl"
|
|
26 # include "incls/_c1_LIRAssembler.cpp.incl"
|
|
27
|
|
28
|
|
29 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
|
|
30 // we must have enough patching space so that call can be inserted
|
|
31 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
|
|
32 _masm->nop();
|
|
33 }
|
|
34 patch->install(_masm, patch_code, obj, info);
|
|
35 append_patching_stub(patch);
|
|
36
|
|
37 #ifdef ASSERT
|
|
38 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->bci());
|
|
39 if (patch->id() == PatchingStub::access_field_id) {
|
|
40 switch (code) {
|
|
41 case Bytecodes::_putstatic:
|
|
42 case Bytecodes::_getstatic:
|
|
43 case Bytecodes::_putfield:
|
|
44 case Bytecodes::_getfield:
|
|
45 break;
|
|
46 default:
|
|
47 ShouldNotReachHere();
|
|
48 }
|
|
49 } else if (patch->id() == PatchingStub::load_klass_id) {
|
|
50 switch (code) {
|
|
51 case Bytecodes::_putstatic:
|
|
52 case Bytecodes::_getstatic:
|
|
53 case Bytecodes::_new:
|
|
54 case Bytecodes::_anewarray:
|
|
55 case Bytecodes::_multianewarray:
|
|
56 case Bytecodes::_instanceof:
|
|
57 case Bytecodes::_checkcast:
|
|
58 case Bytecodes::_ldc:
|
|
59 case Bytecodes::_ldc_w:
|
|
60 break;
|
|
61 default:
|
|
62 ShouldNotReachHere();
|
|
63 }
|
|
64 } else {
|
|
65 ShouldNotReachHere();
|
|
66 }
|
|
67 #endif
|
|
68 }
|
|
69
|
|
70
|
|
71 //---------------------------------------------------------------
|
|
72
|
|
73
|
|
74 LIR_Assembler::LIR_Assembler(Compilation* c):
|
|
75 _compilation(c)
|
|
76 , _masm(c->masm())
|
342
|
77 , _bs(Universe::heap()->barrier_set())
|
0
|
78 , _frame_map(c->frame_map())
|
|
79 , _current_block(NULL)
|
|
80 , _pending_non_safepoint(NULL)
|
|
81 , _pending_non_safepoint_offset(0)
|
|
82 {
|
|
83 _slow_case_stubs = new CodeStubList();
|
|
84 }
|
|
85
|
|
86
|
|
87 LIR_Assembler::~LIR_Assembler() {
|
|
88 }
|
|
89
|
|
90
|
|
91 void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
|
|
92 _slow_case_stubs->append(stub);
|
|
93 }
|
|
94
|
|
95
|
|
96 void LIR_Assembler::check_codespace() {
|
|
97 CodeSection* cs = _masm->code_section();
|
|
98 if (cs->remaining() < (int)(1*K)) {
|
|
99 BAILOUT("CodeBuffer overflow");
|
|
100 }
|
|
101 }
|
|
102
|
|
103
|
|
104 void LIR_Assembler::emit_code_stub(CodeStub* stub) {
|
|
105 _slow_case_stubs->append(stub);
|
|
106 }
|
|
107
|
|
108 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
|
|
109 for (int m = 0; m < stub_list->length(); m++) {
|
|
110 CodeStub* s = (*stub_list)[m];
|
|
111
|
|
112 check_codespace();
|
|
113 CHECK_BAILOUT();
|
|
114
|
|
115 #ifndef PRODUCT
|
|
116 if (CommentedAssembly) {
|
|
117 stringStream st;
|
|
118 s->print_name(&st);
|
|
119 st.print(" slow case");
|
|
120 _masm->block_comment(st.as_string());
|
|
121 }
|
|
122 #endif
|
|
123 s->emit_code(this);
|
|
124 #ifdef ASSERT
|
|
125 s->assert_no_unbound_labels();
|
|
126 #endif
|
|
127 }
|
|
128 }
|
|
129
|
|
130
|
|
131 void LIR_Assembler::emit_slow_case_stubs() {
|
|
132 emit_stubs(_slow_case_stubs);
|
|
133 }
|
|
134
|
|
135
|
|
136 bool LIR_Assembler::needs_icache(ciMethod* method) const {
|
|
137 return !method->is_static();
|
|
138 }
|
|
139
|
|
140
|
|
141 int LIR_Assembler::code_offset() const {
|
|
142 return _masm->offset();
|
|
143 }
|
|
144
|
|
145
|
|
146 address LIR_Assembler::pc() const {
|
|
147 return _masm->pc();
|
|
148 }
|
|
149
|
|
150
|
|
151 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
|
|
152 for (int i = 0; i < info_list->length(); i++) {
|
|
153 XHandlers* handlers = info_list->at(i)->exception_handlers();
|
|
154
|
|
155 for (int j = 0; j < handlers->length(); j++) {
|
|
156 XHandler* handler = handlers->handler_at(j);
|
|
157 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
|
|
158 assert(handler->entry_code() == NULL ||
|
|
159 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
|
|
160 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
|
|
161
|
|
162 if (handler->entry_pco() == -1) {
|
|
163 // entry code not emitted yet
|
|
164 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
|
|
165 handler->set_entry_pco(code_offset());
|
|
166 if (CommentedAssembly) {
|
|
167 _masm->block_comment("Exception adapter block");
|
|
168 }
|
|
169 emit_lir_list(handler->entry_code());
|
|
170 } else {
|
|
171 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
|
|
172 }
|
|
173
|
|
174 assert(handler->entry_pco() != -1, "must be set now");
|
|
175 }
|
|
176 }
|
|
177 }
|
|
178 }
|
|
179
|
|
180
|
|
181 void LIR_Assembler::emit_code(BlockList* hir) {
|
|
182 if (PrintLIR) {
|
|
183 print_LIR(hir);
|
|
184 }
|
|
185
|
|
186 int n = hir->length();
|
|
187 for (int i = 0; i < n; i++) {
|
|
188 emit_block(hir->at(i));
|
|
189 CHECK_BAILOUT();
|
|
190 }
|
|
191
|
|
192 flush_debug_info(code_offset());
|
|
193
|
|
194 DEBUG_ONLY(check_no_unbound_labels());
|
|
195 }
|
|
196
|
|
197
|
|
198 void LIR_Assembler::emit_block(BlockBegin* block) {
|
|
199 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
|
|
200 align_backward_branch_target();
|
|
201 }
|
|
202
|
|
203 // if this block is the start of an exception handler, record the
|
|
204 // PC offset of the first instruction for later construction of
|
|
205 // the ExceptionHandlerTable
|
|
206 if (block->is_set(BlockBegin::exception_entry_flag)) {
|
|
207 block->set_exception_handler_pco(code_offset());
|
|
208 }
|
|
209
|
|
210 #ifndef PRODUCT
|
|
211 if (PrintLIRWithAssembly) {
|
|
212 // don't print Phi's
|
|
213 InstructionPrinter ip(false);
|
|
214 block->print(ip);
|
|
215 }
|
|
216 #endif /* PRODUCT */
|
|
217
|
|
218 assert(block->lir() != NULL, "must have LIR");
|
304
|
219 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
|
0
|
220
|
|
221 #ifndef PRODUCT
|
|
222 if (CommentedAssembly) {
|
|
223 stringStream st;
|
|
224 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->bci());
|
|
225 _masm->block_comment(st.as_string());
|
|
226 }
|
|
227 #endif
|
|
228
|
|
229 emit_lir_list(block->lir());
|
|
230
|
304
|
231 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
|
0
|
232 }
|
|
233
|
|
234
|
|
235 void LIR_Assembler::emit_lir_list(LIR_List* list) {
|
|
236 peephole(list);
|
|
237
|
|
238 int n = list->length();
|
|
239 for (int i = 0; i < n; i++) {
|
|
240 LIR_Op* op = list->at(i);
|
|
241
|
|
242 check_codespace();
|
|
243 CHECK_BAILOUT();
|
|
244
|
|
245 #ifndef PRODUCT
|
|
246 if (CommentedAssembly) {
|
|
247 // Don't record out every op since that's too verbose. Print
|
|
248 // branches since they include block and stub names. Also print
|
|
249 // patching moves since they generate funny looking code.
|
|
250 if (op->code() == lir_branch ||
|
|
251 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
|
|
252 stringStream st;
|
|
253 op->print_on(&st);
|
|
254 _masm->block_comment(st.as_string());
|
|
255 }
|
|
256 }
|
|
257 if (PrintLIRWithAssembly) {
|
|
258 // print out the LIR operation followed by the resulting assembly
|
|
259 list->at(i)->print(); tty->cr();
|
|
260 }
|
|
261 #endif /* PRODUCT */
|
|
262
|
|
263 op->emit_code(this);
|
|
264
|
|
265 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
|
|
266 process_debug_info(op);
|
|
267 }
|
|
268
|
|
269 #ifndef PRODUCT
|
|
270 if (PrintLIRWithAssembly) {
|
|
271 _masm->code()->decode();
|
|
272 }
|
|
273 #endif /* PRODUCT */
|
|
274 }
|
|
275 }
|
|
276
|
|
277 #ifdef ASSERT
|
|
278 void LIR_Assembler::check_no_unbound_labels() {
|
|
279 CHECK_BAILOUT();
|
|
280
|
|
281 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
|
|
282 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
|
|
283 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
|
|
284 assert(false, "unbound label");
|
|
285 }
|
|
286 }
|
|
287 }
|
|
288 #endif
|
|
289
|
|
290 //----------------------------------debug info--------------------------------
|
|
291
|
|
292
|
|
293 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
|
|
294 _masm->code_section()->relocate(pc(), relocInfo::poll_type);
|
|
295 int pc_offset = code_offset();
|
|
296 flush_debug_info(pc_offset);
|
|
297 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
|
|
298 if (info->exception_handlers() != NULL) {
|
|
299 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
|
|
300 }
|
|
301 }
|
|
302
|
|
303
|
|
304 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
|
|
305 flush_debug_info(pc_offset);
|
|
306 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
|
|
307 if (cinfo->exception_handlers() != NULL) {
|
|
308 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
|
|
309 }
|
|
310 }
|
|
311
|
|
312 static ValueStack* debug_info(Instruction* ins) {
|
|
313 StateSplit* ss = ins->as_StateSplit();
|
|
314 if (ss != NULL) return ss->state();
|
|
315 return ins->lock_stack();
|
|
316 }
|
|
317
|
|
318 void LIR_Assembler::process_debug_info(LIR_Op* op) {
|
|
319 Instruction* src = op->source();
|
|
320 if (src == NULL) return;
|
|
321 int pc_offset = code_offset();
|
|
322 if (_pending_non_safepoint == src) {
|
|
323 _pending_non_safepoint_offset = pc_offset;
|
|
324 return;
|
|
325 }
|
|
326 ValueStack* vstack = debug_info(src);
|
|
327 if (vstack == NULL) return;
|
|
328 if (_pending_non_safepoint != NULL) {
|
|
329 // Got some old debug info. Get rid of it.
|
|
330 if (_pending_non_safepoint->bci() == src->bci() &&
|
|
331 debug_info(_pending_non_safepoint) == vstack) {
|
|
332 _pending_non_safepoint_offset = pc_offset;
|
|
333 return;
|
|
334 }
|
|
335 if (_pending_non_safepoint_offset < pc_offset) {
|
|
336 record_non_safepoint_debug_info();
|
|
337 }
|
|
338 _pending_non_safepoint = NULL;
|
|
339 }
|
|
340 // Remember the debug info.
|
|
341 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
|
|
342 _pending_non_safepoint = src;
|
|
343 _pending_non_safepoint_offset = pc_offset;
|
|
344 }
|
|
345 }
|
|
346
|
|
347 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
|
|
348 // Return NULL if n is too large.
|
|
349 // Returns the caller_bci for the next-younger state, also.
|
|
350 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
|
|
351 ValueStack* t = s;
|
|
352 for (int i = 0; i < n; i++) {
|
|
353 if (t == NULL) break;
|
|
354 t = t->caller_state();
|
|
355 }
|
|
356 if (t == NULL) return NULL;
|
|
357 for (;;) {
|
|
358 ValueStack* tc = t->caller_state();
|
|
359 if (tc == NULL) return s;
|
|
360 t = tc;
|
|
361 bci_result = s->scope()->caller_bci();
|
|
362 s = s->caller_state();
|
|
363 }
|
|
364 }
|
|
365
|
|
366 void LIR_Assembler::record_non_safepoint_debug_info() {
|
|
367 int pc_offset = _pending_non_safepoint_offset;
|
|
368 ValueStack* vstack = debug_info(_pending_non_safepoint);
|
|
369 int bci = _pending_non_safepoint->bci();
|
|
370
|
|
371 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
|
|
372 assert(debug_info->recording_non_safepoints(), "sanity");
|
|
373
|
|
374 debug_info->add_non_safepoint(pc_offset);
|
|
375
|
|
376 // Visit scopes from oldest to youngest.
|
|
377 for (int n = 0; ; n++) {
|
|
378 int s_bci = bci;
|
|
379 ValueStack* s = nth_oldest(vstack, n, s_bci);
|
|
380 if (s == NULL) break;
|
|
381 IRScope* scope = s->scope();
|
|
382 debug_info->describe_scope(pc_offset, scope->method(), s_bci);
|
|
383 }
|
|
384
|
|
385 debug_info->end_non_safepoint(pc_offset);
|
|
386 }
|
|
387
|
|
388
|
|
389 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
|
|
390 add_debug_info_for_null_check(code_offset(), cinfo);
|
|
391 }
|
|
392
|
|
393 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
|
|
394 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
|
|
395 emit_code_stub(stub);
|
|
396 }
|
|
397
|
|
398 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
|
|
399 add_debug_info_for_div0(code_offset(), info);
|
|
400 }
|
|
401
|
|
402 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
|
|
403 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
|
|
404 emit_code_stub(stub);
|
|
405 }
|
|
406
|
|
407 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
|
|
408 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
|
|
409 }
|
|
410
|
|
411
|
|
412 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
|
|
413 verify_oop_map(op->info());
|
|
414
|
|
415 if (os::is_MP()) {
|
|
416 // must align calls sites, otherwise they can't be updated atomically on MP hardware
|
|
417 align_call(op->code());
|
|
418 }
|
|
419
|
|
420 // emit the static call stub stuff out of line
|
|
421 emit_static_call_stub();
|
|
422
|
|
423 switch (op->code()) {
|
|
424 case lir_static_call:
|
|
425 call(op->addr(), relocInfo::static_call_type, op->info());
|
|
426 break;
|
|
427 case lir_optvirtual_call:
|
|
428 call(op->addr(), relocInfo::opt_virtual_call_type, op->info());
|
|
429 break;
|
|
430 case lir_icvirtual_call:
|
|
431 ic_call(op->addr(), op->info());
|
|
432 break;
|
|
433 case lir_virtual_call:
|
|
434 vtable_call(op->vtable_offset(), op->info());
|
|
435 break;
|
|
436 default: ShouldNotReachHere();
|
|
437 }
|
304
|
438 #if defined(X86) && defined(TIERED)
|
0
|
439 // C2 leave fpu stack dirty clean it
|
|
440 if (UseSSE < 2) {
|
|
441 int i;
|
|
442 for ( i = 1; i <= 7 ; i++ ) {
|
|
443 ffree(i);
|
|
444 }
|
|
445 if (!op->result_opr()->is_float_kind()) {
|
|
446 ffree(0);
|
|
447 }
|
|
448 }
|
304
|
449 #endif // X86 && TIERED
|
0
|
450 }
|
|
451
|
|
452
|
|
453 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
|
|
454 _masm->bind (*(op->label()));
|
|
455 }
|
|
456
|
|
457
|
|
458 void LIR_Assembler::emit_op1(LIR_Op1* op) {
|
|
459 switch (op->code()) {
|
|
460 case lir_move:
|
|
461 if (op->move_kind() == lir_move_volatile) {
|
|
462 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
|
|
463 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
|
|
464 } else {
|
|
465 move_op(op->in_opr(), op->result_opr(), op->type(),
|
|
466 op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned);
|
|
467 }
|
|
468 break;
|
|
469
|
|
470 case lir_prefetchr:
|
|
471 prefetchr(op->in_opr());
|
|
472 break;
|
|
473
|
|
474 case lir_prefetchw:
|
|
475 prefetchw(op->in_opr());
|
|
476 break;
|
|
477
|
|
478 case lir_roundfp: {
|
|
479 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
|
|
480 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
|
|
481 break;
|
|
482 }
|
|
483
|
|
484 case lir_return:
|
|
485 return_op(op->in_opr());
|
|
486 break;
|
|
487
|
|
488 case lir_safepoint:
|
|
489 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
|
|
490 _masm->nop();
|
|
491 }
|
|
492 safepoint_poll(op->in_opr(), op->info());
|
|
493 break;
|
|
494
|
|
495 case lir_fxch:
|
|
496 fxch(op->in_opr()->as_jint());
|
|
497 break;
|
|
498
|
|
499 case lir_fld:
|
|
500 fld(op->in_opr()->as_jint());
|
|
501 break;
|
|
502
|
|
503 case lir_ffree:
|
|
504 ffree(op->in_opr()->as_jint());
|
|
505 break;
|
|
506
|
|
507 case lir_branch:
|
|
508 break;
|
|
509
|
|
510 case lir_push:
|
|
511 push(op->in_opr());
|
|
512 break;
|
|
513
|
|
514 case lir_pop:
|
|
515 pop(op->in_opr());
|
|
516 break;
|
|
517
|
|
518 case lir_neg:
|
|
519 negate(op->in_opr(), op->result_opr());
|
|
520 break;
|
|
521
|
|
522 case lir_leal:
|
|
523 leal(op->in_opr(), op->result_opr());
|
|
524 break;
|
|
525
|
|
526 case lir_null_check:
|
|
527 if (GenerateCompilerNullChecks) {
|
|
528 add_debug_info_for_null_check_here(op->info());
|
|
529
|
|
530 if (op->in_opr()->is_single_cpu()) {
|
|
531 _masm->null_check(op->in_opr()->as_register());
|
|
532 } else {
|
|
533 Unimplemented();
|
|
534 }
|
|
535 }
|
|
536 break;
|
|
537
|
|
538 case lir_monaddr:
|
|
539 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
|
|
540 break;
|
|
541
|
|
542 default:
|
|
543 Unimplemented();
|
|
544 break;
|
|
545 }
|
|
546 }
|
|
547
|
|
548
|
|
549 void LIR_Assembler::emit_op0(LIR_Op0* op) {
|
|
550 switch (op->code()) {
|
|
551 case lir_word_align: {
|
|
552 while (code_offset() % BytesPerWord != 0) {
|
|
553 _masm->nop();
|
|
554 }
|
|
555 break;
|
|
556 }
|
|
557
|
|
558 case lir_nop:
|
|
559 assert(op->info() == NULL, "not supported");
|
|
560 _masm->nop();
|
|
561 break;
|
|
562
|
|
563 case lir_label:
|
|
564 Unimplemented();
|
|
565 break;
|
|
566
|
|
567 case lir_build_frame:
|
|
568 build_frame();
|
|
569 break;
|
|
570
|
|
571 case lir_std_entry:
|
|
572 // init offsets
|
|
573 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
|
|
574 _masm->align(CodeEntryAlignment);
|
|
575 if (needs_icache(compilation()->method())) {
|
|
576 check_icache();
|
|
577 }
|
|
578 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
|
|
579 _masm->verified_entry();
|
|
580 build_frame();
|
|
581 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
|
|
582 break;
|
|
583
|
|
584 case lir_osr_entry:
|
|
585 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
|
|
586 osr_entry();
|
|
587 break;
|
|
588
|
|
589 case lir_24bit_FPU:
|
|
590 set_24bit_FPU();
|
|
591 break;
|
|
592
|
|
593 case lir_reset_FPU:
|
|
594 reset_FPU();
|
|
595 break;
|
|
596
|
|
597 case lir_breakpoint:
|
|
598 breakpoint();
|
|
599 break;
|
|
600
|
|
601 case lir_fpop_raw:
|
|
602 fpop();
|
|
603 break;
|
|
604
|
|
605 case lir_membar:
|
|
606 membar();
|
|
607 break;
|
|
608
|
|
609 case lir_membar_acquire:
|
|
610 membar_acquire();
|
|
611 break;
|
|
612
|
|
613 case lir_membar_release:
|
|
614 membar_release();
|
|
615 break;
|
|
616
|
|
617 case lir_get_thread:
|
|
618 get_thread(op->result_opr());
|
|
619 break;
|
|
620
|
|
621 default:
|
|
622 ShouldNotReachHere();
|
|
623 break;
|
|
624 }
|
|
625 }
|
|
626
|
|
627
|
|
628 void LIR_Assembler::emit_op2(LIR_Op2* op) {
|
|
629 switch (op->code()) {
|
|
630 case lir_cmp:
|
|
631 if (op->info() != NULL) {
|
|
632 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
|
|
633 "shouldn't be codeemitinfo for non-address operands");
|
|
634 add_debug_info_for_null_check_here(op->info()); // exception possible
|
|
635 }
|
|
636 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
|
|
637 break;
|
|
638
|
|
639 case lir_cmp_l2i:
|
|
640 case lir_cmp_fd2i:
|
|
641 case lir_ucmp_fd2i:
|
|
642 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
|
|
643 break;
|
|
644
|
|
645 case lir_cmove:
|
|
646 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr());
|
|
647 break;
|
|
648
|
|
649 case lir_shl:
|
|
650 case lir_shr:
|
|
651 case lir_ushr:
|
|
652 if (op->in_opr2()->is_constant()) {
|
|
653 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
|
|
654 } else {
|
|
655 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp_opr());
|
|
656 }
|
|
657 break;
|
|
658
|
|
659 case lir_add:
|
|
660 case lir_sub:
|
|
661 case lir_mul:
|
|
662 case lir_mul_strictfp:
|
|
663 case lir_div:
|
|
664 case lir_div_strictfp:
|
|
665 case lir_rem:
|
|
666 assert(op->fpu_pop_count() < 2, "");
|
|
667 arith_op(
|
|
668 op->code(),
|
|
669 op->in_opr1(),
|
|
670 op->in_opr2(),
|
|
671 op->result_opr(),
|
|
672 op->info(),
|
|
673 op->fpu_pop_count() == 1);
|
|
674 break;
|
|
675
|
|
676 case lir_abs:
|
|
677 case lir_sqrt:
|
|
678 case lir_sin:
|
|
679 case lir_tan:
|
|
680 case lir_cos:
|
|
681 case lir_log:
|
|
682 case lir_log10:
|
|
683 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
|
|
684 break;
|
|
685
|
|
686 case lir_logic_and:
|
|
687 case lir_logic_or:
|
|
688 case lir_logic_xor:
|
|
689 logic_op(
|
|
690 op->code(),
|
|
691 op->in_opr1(),
|
|
692 op->in_opr2(),
|
|
693 op->result_opr());
|
|
694 break;
|
|
695
|
|
696 case lir_throw:
|
|
697 case lir_unwind:
|
|
698 throw_op(op->in_opr1(), op->in_opr2(), op->info(), op->code() == lir_unwind);
|
|
699 break;
|
|
700
|
|
701 default:
|
|
702 Unimplemented();
|
|
703 break;
|
|
704 }
|
|
705 }
|
|
706
|
|
707
|
|
708 void LIR_Assembler::build_frame() {
|
|
709 _masm->build_frame(initial_frame_size_in_bytes());
|
|
710 }
|
|
711
|
|
712
|
|
713 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
|
|
714 assert((src->is_single_fpu() && dest->is_single_stack()) ||
|
|
715 (src->is_double_fpu() && dest->is_double_stack()),
|
|
716 "round_fp: rounds register -> stack location");
|
|
717
|
|
718 reg2stack (src, dest, src->type(), pop_fpu_stack);
|
|
719 }
|
|
720
|
|
721
|
|
722 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) {
|
|
723 if (src->is_register()) {
|
|
724 if (dest->is_register()) {
|
|
725 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
|
|
726 reg2reg(src, dest);
|
|
727 } else if (dest->is_stack()) {
|
|
728 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
|
|
729 reg2stack(src, dest, type, pop_fpu_stack);
|
|
730 } else if (dest->is_address()) {
|
|
731 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned);
|
|
732 } else {
|
|
733 ShouldNotReachHere();
|
|
734 }
|
|
735
|
|
736 } else if (src->is_stack()) {
|
|
737 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
|
|
738 if (dest->is_register()) {
|
|
739 stack2reg(src, dest, type);
|
|
740 } else if (dest->is_stack()) {
|
|
741 stack2stack(src, dest, type);
|
|
742 } else {
|
|
743 ShouldNotReachHere();
|
|
744 }
|
|
745
|
|
746 } else if (src->is_constant()) {
|
|
747 if (dest->is_register()) {
|
|
748 const2reg(src, dest, patch_code, info); // patching is possible
|
|
749 } else if (dest->is_stack()) {
|
|
750 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
|
|
751 const2stack(src, dest);
|
|
752 } else if (dest->is_address()) {
|
|
753 assert(patch_code == lir_patch_none, "no patching allowed here");
|
|
754 const2mem(src, dest, type, info);
|
|
755 } else {
|
|
756 ShouldNotReachHere();
|
|
757 }
|
|
758
|
|
759 } else if (src->is_address()) {
|
|
760 mem2reg(src, dest, type, patch_code, info, unaligned);
|
|
761
|
|
762 } else {
|
|
763 ShouldNotReachHere();
|
|
764 }
|
|
765 }
|
|
766
|
|
767
|
|
768 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
|
|
769 #ifndef PRODUCT
|
|
770 if (VerifyOopMaps || VerifyOops) {
|
|
771 bool v = VerifyOops;
|
|
772 VerifyOops = true;
|
|
773 OopMapStream s(info->oop_map());
|
|
774 while (!s.is_done()) {
|
|
775 OopMapValue v = s.current();
|
|
776 if (v.is_oop()) {
|
|
777 VMReg r = v.reg();
|
|
778 if (!r->is_stack()) {
|
|
779 stringStream st;
|
|
780 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
|
|
781 #ifdef SPARC
|
|
782 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
|
|
783 #else
|
|
784 _masm->verify_oop(r->as_Register());
|
|
785 #endif
|
|
786 } else {
|
|
787 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
|
|
788 }
|
|
789 }
|
|
790 s.next();
|
|
791 }
|
|
792 VerifyOops = v;
|
|
793 }
|
|
794 #endif
|
|
795 }
|