comparison src/share/vm/c1/c1_LIRAssembler.cpp @ 0:a61af66fc99e jdk7-b24

Initial load
author duke
date Sat, 01 Dec 2007 00:00:00 +0000
parents
children dc7f315e41f7 37f87013dfd8
comparison
equal deleted inserted replaced
-1:000000000000 0:a61af66fc99e
1 /*
2 * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_c1_LIRAssembler.cpp.incl"
27
28
29 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
30 // we must have enough patching space so that call can be inserted
31 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
32 _masm->nop();
33 }
34 patch->install(_masm, patch_code, obj, info);
35 append_patching_stub(patch);
36
37 #ifdef ASSERT
38 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->bci());
39 if (patch->id() == PatchingStub::access_field_id) {
40 switch (code) {
41 case Bytecodes::_putstatic:
42 case Bytecodes::_getstatic:
43 case Bytecodes::_putfield:
44 case Bytecodes::_getfield:
45 break;
46 default:
47 ShouldNotReachHere();
48 }
49 } else if (patch->id() == PatchingStub::load_klass_id) {
50 switch (code) {
51 case Bytecodes::_putstatic:
52 case Bytecodes::_getstatic:
53 case Bytecodes::_new:
54 case Bytecodes::_anewarray:
55 case Bytecodes::_multianewarray:
56 case Bytecodes::_instanceof:
57 case Bytecodes::_checkcast:
58 case Bytecodes::_ldc:
59 case Bytecodes::_ldc_w:
60 break;
61 default:
62 ShouldNotReachHere();
63 }
64 } else {
65 ShouldNotReachHere();
66 }
67 #endif
68 }
69
70
71 //---------------------------------------------------------------
72
73
74 LIR_Assembler::LIR_Assembler(Compilation* c):
75 _compilation(c)
76 , _masm(c->masm())
77 , _frame_map(c->frame_map())
78 , _current_block(NULL)
79 , _pending_non_safepoint(NULL)
80 , _pending_non_safepoint_offset(0)
81 {
82 _slow_case_stubs = new CodeStubList();
83 }
84
85
86 LIR_Assembler::~LIR_Assembler() {
87 }
88
89
90 void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
91 _slow_case_stubs->append(stub);
92 }
93
94
95 void LIR_Assembler::check_codespace() {
96 CodeSection* cs = _masm->code_section();
97 if (cs->remaining() < (int)(1*K)) {
98 BAILOUT("CodeBuffer overflow");
99 }
100 }
101
102
103 void LIR_Assembler::emit_code_stub(CodeStub* stub) {
104 _slow_case_stubs->append(stub);
105 }
106
107 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
108 for (int m = 0; m < stub_list->length(); m++) {
109 CodeStub* s = (*stub_list)[m];
110
111 check_codespace();
112 CHECK_BAILOUT();
113
114 #ifndef PRODUCT
115 if (CommentedAssembly) {
116 stringStream st;
117 s->print_name(&st);
118 st.print(" slow case");
119 _masm->block_comment(st.as_string());
120 }
121 #endif
122 s->emit_code(this);
123 #ifdef ASSERT
124 s->assert_no_unbound_labels();
125 #endif
126 }
127 }
128
129
130 void LIR_Assembler::emit_slow_case_stubs() {
131 emit_stubs(_slow_case_stubs);
132 }
133
134
135 bool LIR_Assembler::needs_icache(ciMethod* method) const {
136 return !method->is_static();
137 }
138
139
140 int LIR_Assembler::code_offset() const {
141 return _masm->offset();
142 }
143
144
145 address LIR_Assembler::pc() const {
146 return _masm->pc();
147 }
148
149
150 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
151 for (int i = 0; i < info_list->length(); i++) {
152 XHandlers* handlers = info_list->at(i)->exception_handlers();
153
154 for (int j = 0; j < handlers->length(); j++) {
155 XHandler* handler = handlers->handler_at(j);
156 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
157 assert(handler->entry_code() == NULL ||
158 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
159 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
160
161 if (handler->entry_pco() == -1) {
162 // entry code not emitted yet
163 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
164 handler->set_entry_pco(code_offset());
165 if (CommentedAssembly) {
166 _masm->block_comment("Exception adapter block");
167 }
168 emit_lir_list(handler->entry_code());
169 } else {
170 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
171 }
172
173 assert(handler->entry_pco() != -1, "must be set now");
174 }
175 }
176 }
177 }
178
179
180 void LIR_Assembler::emit_code(BlockList* hir) {
181 if (PrintLIR) {
182 print_LIR(hir);
183 }
184
185 int n = hir->length();
186 for (int i = 0; i < n; i++) {
187 emit_block(hir->at(i));
188 CHECK_BAILOUT();
189 }
190
191 flush_debug_info(code_offset());
192
193 DEBUG_ONLY(check_no_unbound_labels());
194 }
195
196
197 void LIR_Assembler::emit_block(BlockBegin* block) {
198 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
199 align_backward_branch_target();
200 }
201
202 // if this block is the start of an exception handler, record the
203 // PC offset of the first instruction for later construction of
204 // the ExceptionHandlerTable
205 if (block->is_set(BlockBegin::exception_entry_flag)) {
206 block->set_exception_handler_pco(code_offset());
207 }
208
209 #ifndef PRODUCT
210 if (PrintLIRWithAssembly) {
211 // don't print Phi's
212 InstructionPrinter ip(false);
213 block->print(ip);
214 }
215 #endif /* PRODUCT */
216
217 assert(block->lir() != NULL, "must have LIR");
218 IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
219
220 #ifndef PRODUCT
221 if (CommentedAssembly) {
222 stringStream st;
223 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->bci());
224 _masm->block_comment(st.as_string());
225 }
226 #endif
227
228 emit_lir_list(block->lir());
229
230 IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
231 }
232
233
234 void LIR_Assembler::emit_lir_list(LIR_List* list) {
235 peephole(list);
236
237 int n = list->length();
238 for (int i = 0; i < n; i++) {
239 LIR_Op* op = list->at(i);
240
241 check_codespace();
242 CHECK_BAILOUT();
243
244 #ifndef PRODUCT
245 if (CommentedAssembly) {
246 // Don't record out every op since that's too verbose. Print
247 // branches since they include block and stub names. Also print
248 // patching moves since they generate funny looking code.
249 if (op->code() == lir_branch ||
250 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
251 stringStream st;
252 op->print_on(&st);
253 _masm->block_comment(st.as_string());
254 }
255 }
256 if (PrintLIRWithAssembly) {
257 // print out the LIR operation followed by the resulting assembly
258 list->at(i)->print(); tty->cr();
259 }
260 #endif /* PRODUCT */
261
262 op->emit_code(this);
263
264 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
265 process_debug_info(op);
266 }
267
268 #ifndef PRODUCT
269 if (PrintLIRWithAssembly) {
270 _masm->code()->decode();
271 }
272 #endif /* PRODUCT */
273 }
274 }
275
276 #ifdef ASSERT
277 void LIR_Assembler::check_no_unbound_labels() {
278 CHECK_BAILOUT();
279
280 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
281 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
282 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
283 assert(false, "unbound label");
284 }
285 }
286 }
287 #endif
288
289 //----------------------------------debug info--------------------------------
290
291
292 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
293 _masm->code_section()->relocate(pc(), relocInfo::poll_type);
294 int pc_offset = code_offset();
295 flush_debug_info(pc_offset);
296 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
297 if (info->exception_handlers() != NULL) {
298 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
299 }
300 }
301
302
303 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
304 flush_debug_info(pc_offset);
305 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
306 if (cinfo->exception_handlers() != NULL) {
307 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
308 }
309 }
310
311 static ValueStack* debug_info(Instruction* ins) {
312 StateSplit* ss = ins->as_StateSplit();
313 if (ss != NULL) return ss->state();
314 return ins->lock_stack();
315 }
316
317 void LIR_Assembler::process_debug_info(LIR_Op* op) {
318 Instruction* src = op->source();
319 if (src == NULL) return;
320 int pc_offset = code_offset();
321 if (_pending_non_safepoint == src) {
322 _pending_non_safepoint_offset = pc_offset;
323 return;
324 }
325 ValueStack* vstack = debug_info(src);
326 if (vstack == NULL) return;
327 if (_pending_non_safepoint != NULL) {
328 // Got some old debug info. Get rid of it.
329 if (_pending_non_safepoint->bci() == src->bci() &&
330 debug_info(_pending_non_safepoint) == vstack) {
331 _pending_non_safepoint_offset = pc_offset;
332 return;
333 }
334 if (_pending_non_safepoint_offset < pc_offset) {
335 record_non_safepoint_debug_info();
336 }
337 _pending_non_safepoint = NULL;
338 }
339 // Remember the debug info.
340 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
341 _pending_non_safepoint = src;
342 _pending_non_safepoint_offset = pc_offset;
343 }
344 }
345
346 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
347 // Return NULL if n is too large.
348 // Returns the caller_bci for the next-younger state, also.
349 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
350 ValueStack* t = s;
351 for (int i = 0; i < n; i++) {
352 if (t == NULL) break;
353 t = t->caller_state();
354 }
355 if (t == NULL) return NULL;
356 for (;;) {
357 ValueStack* tc = t->caller_state();
358 if (tc == NULL) return s;
359 t = tc;
360 bci_result = s->scope()->caller_bci();
361 s = s->caller_state();
362 }
363 }
364
365 void LIR_Assembler::record_non_safepoint_debug_info() {
366 int pc_offset = _pending_non_safepoint_offset;
367 ValueStack* vstack = debug_info(_pending_non_safepoint);
368 int bci = _pending_non_safepoint->bci();
369
370 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
371 assert(debug_info->recording_non_safepoints(), "sanity");
372
373 debug_info->add_non_safepoint(pc_offset);
374
375 // Visit scopes from oldest to youngest.
376 for (int n = 0; ; n++) {
377 int s_bci = bci;
378 ValueStack* s = nth_oldest(vstack, n, s_bci);
379 if (s == NULL) break;
380 IRScope* scope = s->scope();
381 debug_info->describe_scope(pc_offset, scope->method(), s_bci);
382 }
383
384 debug_info->end_non_safepoint(pc_offset);
385 }
386
387
388 void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
389 add_debug_info_for_null_check(code_offset(), cinfo);
390 }
391
392 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
393 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
394 emit_code_stub(stub);
395 }
396
397 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
398 add_debug_info_for_div0(code_offset(), info);
399 }
400
401 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
402 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
403 emit_code_stub(stub);
404 }
405
406 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
407 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
408 }
409
410
411 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
412 verify_oop_map(op->info());
413
414 if (os::is_MP()) {
415 // must align calls sites, otherwise they can't be updated atomically on MP hardware
416 align_call(op->code());
417 }
418
419 // emit the static call stub stuff out of line
420 emit_static_call_stub();
421
422 switch (op->code()) {
423 case lir_static_call:
424 call(op->addr(), relocInfo::static_call_type, op->info());
425 break;
426 case lir_optvirtual_call:
427 call(op->addr(), relocInfo::opt_virtual_call_type, op->info());
428 break;
429 case lir_icvirtual_call:
430 ic_call(op->addr(), op->info());
431 break;
432 case lir_virtual_call:
433 vtable_call(op->vtable_offset(), op->info());
434 break;
435 default: ShouldNotReachHere();
436 }
437 #if defined(IA32) && defined(TIERED)
438 // C2 leave fpu stack dirty clean it
439 if (UseSSE < 2) {
440 int i;
441 for ( i = 1; i <= 7 ; i++ ) {
442 ffree(i);
443 }
444 if (!op->result_opr()->is_float_kind()) {
445 ffree(0);
446 }
447 }
448 #endif // IA32 && TIERED
449 }
450
451
452 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
453 _masm->bind (*(op->label()));
454 }
455
456
457 void LIR_Assembler::emit_op1(LIR_Op1* op) {
458 switch (op->code()) {
459 case lir_move:
460 if (op->move_kind() == lir_move_volatile) {
461 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
462 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
463 } else {
464 move_op(op->in_opr(), op->result_opr(), op->type(),
465 op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned);
466 }
467 break;
468
469 case lir_prefetchr:
470 prefetchr(op->in_opr());
471 break;
472
473 case lir_prefetchw:
474 prefetchw(op->in_opr());
475 break;
476
477 case lir_roundfp: {
478 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
479 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
480 break;
481 }
482
483 case lir_return:
484 return_op(op->in_opr());
485 break;
486
487 case lir_safepoint:
488 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
489 _masm->nop();
490 }
491 safepoint_poll(op->in_opr(), op->info());
492 break;
493
494 case lir_fxch:
495 fxch(op->in_opr()->as_jint());
496 break;
497
498 case lir_fld:
499 fld(op->in_opr()->as_jint());
500 break;
501
502 case lir_ffree:
503 ffree(op->in_opr()->as_jint());
504 break;
505
506 case lir_branch:
507 break;
508
509 case lir_push:
510 push(op->in_opr());
511 break;
512
513 case lir_pop:
514 pop(op->in_opr());
515 break;
516
517 case lir_neg:
518 negate(op->in_opr(), op->result_opr());
519 break;
520
521 case lir_leal:
522 leal(op->in_opr(), op->result_opr());
523 break;
524
525 case lir_null_check:
526 if (GenerateCompilerNullChecks) {
527 add_debug_info_for_null_check_here(op->info());
528
529 if (op->in_opr()->is_single_cpu()) {
530 _masm->null_check(op->in_opr()->as_register());
531 } else {
532 Unimplemented();
533 }
534 }
535 break;
536
537 case lir_monaddr:
538 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
539 break;
540
541 default:
542 Unimplemented();
543 break;
544 }
545 }
546
547
548 void LIR_Assembler::emit_op0(LIR_Op0* op) {
549 switch (op->code()) {
550 case lir_word_align: {
551 while (code_offset() % BytesPerWord != 0) {
552 _masm->nop();
553 }
554 break;
555 }
556
557 case lir_nop:
558 assert(op->info() == NULL, "not supported");
559 _masm->nop();
560 break;
561
562 case lir_label:
563 Unimplemented();
564 break;
565
566 case lir_build_frame:
567 build_frame();
568 break;
569
570 case lir_std_entry:
571 // init offsets
572 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
573 _masm->align(CodeEntryAlignment);
574 if (needs_icache(compilation()->method())) {
575 check_icache();
576 }
577 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
578 _masm->verified_entry();
579 build_frame();
580 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
581 break;
582
583 case lir_osr_entry:
584 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
585 osr_entry();
586 break;
587
588 case lir_24bit_FPU:
589 set_24bit_FPU();
590 break;
591
592 case lir_reset_FPU:
593 reset_FPU();
594 break;
595
596 case lir_breakpoint:
597 breakpoint();
598 break;
599
600 case lir_fpop_raw:
601 fpop();
602 break;
603
604 case lir_membar:
605 membar();
606 break;
607
608 case lir_membar_acquire:
609 membar_acquire();
610 break;
611
612 case lir_membar_release:
613 membar_release();
614 break;
615
616 case lir_get_thread:
617 get_thread(op->result_opr());
618 break;
619
620 default:
621 ShouldNotReachHere();
622 break;
623 }
624 }
625
626
627 void LIR_Assembler::emit_op2(LIR_Op2* op) {
628 switch (op->code()) {
629 case lir_cmp:
630 if (op->info() != NULL) {
631 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
632 "shouldn't be codeemitinfo for non-address operands");
633 add_debug_info_for_null_check_here(op->info()); // exception possible
634 }
635 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
636 break;
637
638 case lir_cmp_l2i:
639 case lir_cmp_fd2i:
640 case lir_ucmp_fd2i:
641 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
642 break;
643
644 case lir_cmove:
645 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr());
646 break;
647
648 case lir_shl:
649 case lir_shr:
650 case lir_ushr:
651 if (op->in_opr2()->is_constant()) {
652 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
653 } else {
654 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp_opr());
655 }
656 break;
657
658 case lir_add:
659 case lir_sub:
660 case lir_mul:
661 case lir_mul_strictfp:
662 case lir_div:
663 case lir_div_strictfp:
664 case lir_rem:
665 assert(op->fpu_pop_count() < 2, "");
666 arith_op(
667 op->code(),
668 op->in_opr1(),
669 op->in_opr2(),
670 op->result_opr(),
671 op->info(),
672 op->fpu_pop_count() == 1);
673 break;
674
675 case lir_abs:
676 case lir_sqrt:
677 case lir_sin:
678 case lir_tan:
679 case lir_cos:
680 case lir_log:
681 case lir_log10:
682 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
683 break;
684
685 case lir_logic_and:
686 case lir_logic_or:
687 case lir_logic_xor:
688 logic_op(
689 op->code(),
690 op->in_opr1(),
691 op->in_opr2(),
692 op->result_opr());
693 break;
694
695 case lir_throw:
696 case lir_unwind:
697 throw_op(op->in_opr1(), op->in_opr2(), op->info(), op->code() == lir_unwind);
698 break;
699
700 default:
701 Unimplemented();
702 break;
703 }
704 }
705
706
707 void LIR_Assembler::build_frame() {
708 _masm->build_frame(initial_frame_size_in_bytes());
709 }
710
711
712 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
713 assert((src->is_single_fpu() && dest->is_single_stack()) ||
714 (src->is_double_fpu() && dest->is_double_stack()),
715 "round_fp: rounds register -> stack location");
716
717 reg2stack (src, dest, src->type(), pop_fpu_stack);
718 }
719
720
721 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) {
722 if (src->is_register()) {
723 if (dest->is_register()) {
724 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
725 reg2reg(src, dest);
726 } else if (dest->is_stack()) {
727 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
728 reg2stack(src, dest, type, pop_fpu_stack);
729 } else if (dest->is_address()) {
730 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned);
731 } else {
732 ShouldNotReachHere();
733 }
734
735 } else if (src->is_stack()) {
736 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
737 if (dest->is_register()) {
738 stack2reg(src, dest, type);
739 } else if (dest->is_stack()) {
740 stack2stack(src, dest, type);
741 } else {
742 ShouldNotReachHere();
743 }
744
745 } else if (src->is_constant()) {
746 if (dest->is_register()) {
747 const2reg(src, dest, patch_code, info); // patching is possible
748 } else if (dest->is_stack()) {
749 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
750 const2stack(src, dest);
751 } else if (dest->is_address()) {
752 assert(patch_code == lir_patch_none, "no patching allowed here");
753 const2mem(src, dest, type, info);
754 } else {
755 ShouldNotReachHere();
756 }
757
758 } else if (src->is_address()) {
759 mem2reg(src, dest, type, patch_code, info, unaligned);
760
761 } else {
762 ShouldNotReachHere();
763 }
764 }
765
766
767 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
768 #ifndef PRODUCT
769 if (VerifyOopMaps || VerifyOops) {
770 bool v = VerifyOops;
771 VerifyOops = true;
772 OopMapStream s(info->oop_map());
773 while (!s.is_done()) {
774 OopMapValue v = s.current();
775 if (v.is_oop()) {
776 VMReg r = v.reg();
777 if (!r->is_stack()) {
778 stringStream st;
779 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
780 #ifdef SPARC
781 _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
782 #else
783 _masm->verify_oop(r->as_Register());
784 #endif
785 } else {
786 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
787 }
788 }
789 s.next();
790 }
791 VerifyOops = v;
792 }
793 #endif
794 }