comparison src/cpu/x86/vm/c1_LIRAssembler_x86.cpp @ 1200:ba263cfb7611

6917766: JSR 292 needs its own deopt handler Summary: We need to introduce a new MH deopt handler so we can easily determine if the deopt happened at a MH call site or not. Reviewed-by: never, jrose
author twisti
date Fri, 29 Jan 2010 12:13:05 +0100
parents 323bd24c6520
children 24128c2ffa87
comparison
equal deleted inserted replaced
1199:0e14bd797dad 1200:ba263cfb7611
1 /* 1 /*
2 * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. 2 * Copyright 2000-2010 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
416 // subtract two words to account for return address and link 416 // subtract two words to account for return address and link
417 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size; 417 return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
418 } 418 }
419 419
420 420
421 void LIR_Assembler::emit_exception_handler() { 421 int LIR_Assembler::emit_exception_handler() {
422 // if the last instruction is a call (typically to do a throw which 422 // if the last instruction is a call (typically to do a throw which
423 // is coming at the end after block reordering) the return address 423 // is coming at the end after block reordering) the return address
424 // must still point into the code area in order to avoid assertion 424 // must still point into the code area in order to avoid assertion
425 // failures when searching for the corresponding bci => add a nop 425 // failures when searching for the corresponding bci => add a nop
426 // (was bug 5/14/1999 - gri) 426 // (was bug 5/14/1999 - gri)
427
428 __ nop(); 427 __ nop();
429 428
430 // generate code for exception handler 429 // generate code for exception handler
431 address handler_base = __ start_a_stub(exception_handler_size); 430 address handler_base = __ start_a_stub(exception_handler_size);
432 if (handler_base == NULL) { 431 if (handler_base == NULL) {
433 // not enough space left for the handler 432 // not enough space left for the handler
434 bailout("exception handler overflow"); 433 bailout("exception handler overflow");
435 return; 434 return -1;
436 } 435 }
437 #ifdef ASSERT 436
438 int offset = code_offset(); 437 int offset = code_offset();
439 #endif // ASSERT
440
441 compilation()->offsets()->set_value(CodeOffsets::Exceptions, code_offset());
442 438
443 // if the method does not have an exception handler, then there is 439 // if the method does not have an exception handler, then there is
444 // no reason to search for one 440 // no reason to search for one
445 if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) { 441 if (compilation()->has_exception_handlers() || compilation()->env()->jvmti_can_post_exceptions()) {
446 // the exception oop and pc are in rax, and rdx 442 // the exception oop and pc are in rax, and rdx
472 } 468 }
473 469
474 // unwind activation and forward exception to caller 470 // unwind activation and forward exception to caller
475 // rax,: exception 471 // rax,: exception
476 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id))); 472 __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
477
478 assert(code_offset() - offset <= exception_handler_size, "overflow"); 473 assert(code_offset() - offset <= exception_handler_size, "overflow");
479
480 __ end_a_stub(); 474 __ end_a_stub();
481 } 475
482 476 return offset;
483 void LIR_Assembler::emit_deopt_handler() { 477 }
478
479
480 int LIR_Assembler::emit_deopt_handler() {
484 // if the last instruction is a call (typically to do a throw which 481 // if the last instruction is a call (typically to do a throw which
485 // is coming at the end after block reordering) the return address 482 // is coming at the end after block reordering) the return address
486 // must still point into the code area in order to avoid assertion 483 // must still point into the code area in order to avoid assertion
487 // failures when searching for the corresponding bci => add a nop 484 // failures when searching for the corresponding bci => add a nop
488 // (was bug 5/14/1999 - gri) 485 // (was bug 5/14/1999 - gri)
489
490 __ nop(); 486 __ nop();
491 487
492 // generate code for exception handler 488 // generate code for exception handler
493 address handler_base = __ start_a_stub(deopt_handler_size); 489 address handler_base = __ start_a_stub(deopt_handler_size);
494 if (handler_base == NULL) { 490 if (handler_base == NULL) {
495 // not enough space left for the handler 491 // not enough space left for the handler
496 bailout("deopt handler overflow"); 492 bailout("deopt handler overflow");
497 return; 493 return -1;
498 } 494 }
499 #ifdef ASSERT 495
500 int offset = code_offset(); 496 int offset = code_offset();
501 #endif // ASSERT
502
503 compilation()->offsets()->set_value(CodeOffsets::Deopt, code_offset());
504
505 InternalAddress here(__ pc()); 497 InternalAddress here(__ pc());
506 __ pushptr(here.addr()); 498 __ pushptr(here.addr());
507
508 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 499 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
509
510 assert(code_offset() - offset <= deopt_handler_size, "overflow"); 500 assert(code_offset() - offset <= deopt_handler_size, "overflow");
511
512 __ end_a_stub(); 501 __ end_a_stub();
513 502
503 return offset;
514 } 504 }
515 505
516 506
517 // This is the fast version of java.lang.String.compare; it has not 507 // This is the fast version of java.lang.String.compare; it has not
518 // OSR-entry and therefore, we generate a slow version for OSR's 508 // OSR-entry and therefore, we generate a slow version for OSR's