comparison src/cpu/x86/vm/c1_LIRAssembler_x86.cpp @ 1216:7f8790caccb0

Merge
author apangin
date Thu, 04 Feb 2010 15:50:59 -0800
parents 6deeaebad47a 87684f1a88b5
children 7b4415a18c8a
comparison
equal deleted inserted replaced
1215:f19bf22685cc 1216:7f8790caccb0
416 // subtract two words to account for return address and link 416 // subtract two words to account for return address and link
417 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; 417 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
418 } 418 }
419 419
420 420
421 void LIR_Assembler::emit_exception_handler() { 421 int LIR_Assembler::emit_exception_handler() {
422 // if the last instruction is a call (typically to do a throw which 422 // if the last instruction is a call (typically to do a throw which
423 // is coming at the end after block reordering) the return address 423 // is coming at the end after block reordering) the return address
424 // must still point into the code area in order to avoid assertion 424 // must still point into the code area in order to avoid assertion
425 // failures when searching for the corresponding bci => add a nop 425 // failures when searching for the corresponding bci => add a nop
426 // (was bug 5/14/1999 - gri) 426 // (was bug 5/14/1999 - gri)
427
428 __ nop(); 427 __ nop();
429 428
430 // generate code for exception handler 429 // generate code for exception handler
431 address handler_base = __ start_a_stub(exception_handler_size); 430 address handler_base = __ start_a_stub(exception_handler_size);
432 if (handler_base == NULL) { 431 if (handler_base == NULL) {
433 // not enough space left for the handler 432 // not enough space left for the handler
434 bailout("exception handler overflow"); 433 bailout("exception handler overflow");
435 return; 434 return -1;
436 } 435 }
437 #ifdef ASSERT 436
438 int offset = code_offset(); 437 int offset = code_offset();
439 #endif // ASSERT
440
441 compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
442 438
443 // if the method does not have an exception handler, then there is 439 // if the method does not have an exception handler, then there is
444 // no reason to search for one 440 // no reason to search for one
445 if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) { 441 if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_on_exceptions()) {
446 // the exception oop and pc are in rax, and rdx 442 // the exception oop and pc are in rax, and rdx
472 } 468 }
473 469
474 // unwind activation and forward exception to caller 470 // unwind activation and forward exception to caller
475 // rax,: exception 471 // rax,: exception
476 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); 472 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
477
478 assert(code_offset() - offset <= exception_handler_size, "overflow"); 473 assert(code_offset() - offset <= exception_handler_size, "overflow");
479
480 __ end_a_stub(); 474 __ end_a_stub();
481 } 475
482 476 return offset;
483 void LIR_Assembler::emit_deopt_handler() { 477 }
478
479
480 int LIR_Assembler::emit_deopt_handler() {
484 // if the last instruction is a call (typically to do a throw which 481 // if the last instruction is a call (typically to do a throw which
485 // is coming at the end after block reordering) the return address 482 // is coming at the end after block reordering) the return address
486 // must still point into the code area in order to avoid assertion 483 // must still point into the code area in order to avoid assertion
487 // failures when searching for the corresponding bci => add a nop 484 // failures when searching for the corresponding bci => add a nop
488 // (was bug 5/14/1999 - gri) 485 // (was bug 5/14/1999 - gri)
489
490 __ nop(); 486 __ nop();
491 487
492 // generate code for exception handler 488 // generate code for exception handler
493 address handler_base = __ start_a_stub(deopt_handler_size); 489 address handler_base = __ start_a_stub(deopt_handler_size);
494 if (handler_base == NULL) { 490 if (handler_base == NULL) {
495 // not enough space left for the handler 491 // not enough space left for the handler
496 bailout("deopt handler overflow"); 492 bailout("deopt handler overflow");
497 return; 493 return -1;
498 } 494 }
499 #ifdef ASSERT 495
500 int offset = code_offset(); 496 int offset = code_offset();
501 #endif // ASSERT
502
503 compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
504
505 InternalAddress here(__ pc()); 497 InternalAddress here(__ pc());
506 __ pushptr(here.addr()); 498 __ pushptr(here.addr());
507
508 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 499 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
509
510 assert(code_offset() - offset <= deopt_handler_size, "overflow"); 500 assert(code_offset() - offset <= deopt_handler_size, "overflow");
511
512 __ end_a_stub(); 501 __ end_a_stub();
513 502
503 return offset;
514 } 504 }
515 505
516 506
517 // This is the fast version of java.lang.String.compare; it has not 507 // This is the fast version of java.lang.String.compare; it has not
518 // OSR-entry and therefore, we generate a slow version for OSR's 508 // OSR-entry and therefore, we generate a slow version for OSR's
3217 assert(data->is_CounterData(), "need CounterData for calls"); 3207 assert(data->is_CounterData(), "need CounterData for calls");
3218 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 3208 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3219 Register mdo = op->mdo()->as_register(); 3209 Register mdo = op->mdo()->as_register();
3220 __ movoop(mdo, md->constant_encoding()); 3210 __ movoop(mdo, md->constant_encoding());
3221 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 3211 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3222 __ addl(counter_addr, DataLayout::counter_increment);
3223 Bytecodes::Code bc = method->java_code_at_bci(bci); 3212 Bytecodes::Code bc = method->java_code_at_bci(bci);
3224 // Perform additional virtual call profiling for invokevirtual and 3213 // Perform additional virtual call profiling for invokevirtual and
3225 // invokeinterface bytecodes 3214 // invokeinterface bytecodes
3226 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 3215 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
3227 Tier1ProfileVirtualCalls) { 3216 Tier1ProfileVirtualCalls) {
3284 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 3273 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
3285 __ cmpptr(recv_addr, (int32_t)NULL_WORD); 3274 __ cmpptr(recv_addr, (int32_t)NULL_WORD);
3286 __ jcc(Assembler::notEqual, next_test); 3275 __ jcc(Assembler::notEqual, next_test);
3287 __ movptr(recv_addr, recv); 3276 __ movptr(recv_addr, recv);
3288 __ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment); 3277 __ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment);
3289 if (i < (VirtualCallData::row_limit() - 1)) { 3278 __ jmp(update_done);
3290 __ jmp(update_done);
3291 }
3292 __ bind(next_test); 3279 __ bind(next_test);
3293 } 3280 }
3281 // Receiver did not match any saved receiver and there is no empty row for it.
3282 // Increment total counter to indicate polimorphic case.
3283 __ addl(counter_addr, DataLayout::counter_increment);
3294 3284
3295 __ bind(update_done); 3285 __ bind(update_done);
3296 } 3286 }
3287 } else {
3288 // Static call
3289 __ addl(counter_addr, DataLayout::counter_increment);
3297 } 3290 }
3298 } 3291 }
3299 3292
3300 3293
3301 void LIR_Assembler::emit_delay(LIR_OpDelay*) { 3294 void LIR_Assembler::emit_delay(LIR_OpDelay*) {